diff --git a/.travis.yml b/.travis.yml index 22eea20c727e..c5c032c290b8 100644 --- a/.travis.yml +++ b/.travis.yml @@ -4,13 +4,13 @@ language: python python: 3.8 cache: pip before_install: pip install --upgrade pip setuptools six -install: pip install -r requirements.txt +install: pip install black flake8 before_script: - - black --check . || true - - IGNORE=E123,E203,E265,E266,E302,E401,E402,E712,E731,E741,E743,F811,F841,W291,W293,W503 - - flake8 . --count --ignore=$IGNORE --max-complexity=25 --max-line-length=127 --show-source --statistics -script: + - black --check . + - flake8 --ignore=E203,W503 --max-complexity=25 --max-line-length=120 --statistics --count . - scripts/validate_filenames.py # no uppercase, no spaces, in a directory + - pip install -r requirements.txt # fast fail on black, flake8, validate_filenames +script: - mypy --ignore-missing-imports . - pytest --doctest-modules --cov-report=term-missing:skip-covered --cov=. . after_success: diff --git a/DIRECTORY.md b/DIRECTORY.md index aea74f9255f9..2bb18897044f 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -222,6 +222,7 @@ * [Bellman Ford](https://github.com/TheAlgorithms/Python/blob/master/graphs/bellman_ford.py) * [Bfs](https://github.com/TheAlgorithms/Python/blob/master/graphs/bfs.py) * [Bfs Shortest Path](https://github.com/TheAlgorithms/Python/blob/master/graphs/bfs_shortest_path.py) + * [Bidirectional A Star](https://github.com/TheAlgorithms/Python/blob/master/graphs/bidirectional_a_star.py) * [Breadth First Search](https://github.com/TheAlgorithms/Python/blob/master/graphs/breadth_first_search.py) * [Breadth First Search Shortest Path](https://github.com/TheAlgorithms/Python/blob/master/graphs/breadth_first_search_shortest_path.py) * [Check Bipartite Graph Bfs](https://github.com/TheAlgorithms/Python/blob/master/graphs/check_bipartite_graph_bfs.py) @@ -242,6 +243,7 @@ * [Graph List](https://github.com/TheAlgorithms/Python/blob/master/graphs/graph_list.py) * [Graph Matrix](https://github.com/TheAlgorithms/Python/blob/master/graphs/graph_matrix.py) * [Graphs Floyd Warshall](https://github.com/TheAlgorithms/Python/blob/master/graphs/graphs_floyd_warshall.py) + * [Greedy Best First](https://github.com/TheAlgorithms/Python/blob/master/graphs/greedy_best_first.py) * [Kahns Algorithm Long](https://github.com/TheAlgorithms/Python/blob/master/graphs/kahns_algorithm_long.py) * [Kahns Algorithm Topo](https://github.com/TheAlgorithms/Python/blob/master/graphs/kahns_algorithm_topo.py) * [Minimum Spanning Tree Kruskal](https://github.com/TheAlgorithms/Python/blob/master/graphs/minimum_spanning_tree_kruskal.py) @@ -409,6 +411,7 @@ * [Fischer Yates Shuffle](https://github.com/TheAlgorithms/Python/blob/master/other/fischer_yates_shuffle.py) * [Frequency Finder](https://github.com/TheAlgorithms/Python/blob/master/other/frequency_finder.py) * [Game Of Life](https://github.com/TheAlgorithms/Python/blob/master/other/game_of_life.py) + * [Gauss Easter](https://github.com/TheAlgorithms/Python/blob/master/other/gauss_easter.py) * [Greedy](https://github.com/TheAlgorithms/Python/blob/master/other/greedy.py) * [Integeration By Simpson Approx](https://github.com/TheAlgorithms/Python/blob/master/other/integeration_by_simpson_approx.py) * [Largest Subarray Sum](https://github.com/TheAlgorithms/Python/blob/master/other/largest_subarray_sum.py) diff --git a/arithmetic_analysis/newton_forward_interpolation.py b/arithmetic_analysis/newton_forward_interpolation.py index d91b9709f3d6..d32e3efbd1f2 100644 --- a/arithmetic_analysis/newton_forward_interpolation.py +++ b/arithmetic_analysis/newton_forward_interpolation.py @@ -2,6 +2,7 @@ import math + # for calculating u value def ucal(u, p): """ diff --git a/backtracking/coloring.py b/backtracking/coloring.py index 77beb5fc1956..3956b21a9182 100644 --- a/backtracking/coloring.py +++ b/backtracking/coloring.py @@ -18,7 +18,7 @@ def valid_coloring( >>> neighbours = [0,1,0,1,0] >>> colored_vertices = [0, 2, 1, 2, 0] - + >>> color = 1 >>> valid_coloring(neighbours, colored_vertices, color) True @@ -37,11 +37,11 @@ def valid_coloring( def util_color( graph: List[List[int]], max_colors: int, colored_vertices: List[int], index: int ) -> bool: - """ + """ Pseudo-Code Base Case: - 1. Check if coloring is complete + 1. Check if coloring is complete 1.1 If complete return True (meaning that we successfully colored graph) Recursive Step: @@ -60,7 +60,7 @@ def util_color( >>> max_colors = 3 >>> colored_vertices = [0, 1, 0, 0, 0] >>> index = 3 - + >>> util_color(graph, max_colors, colored_vertices, index) True @@ -87,11 +87,11 @@ def util_color( def color(graph: List[List[int]], max_colors: int) -> List[int]: - """ + """ Wrapper function to call subroutine called util_color which will either return True or False. If True is returned colored_vertices list is filled with correct colorings - + >>> graph = [[0, 1, 0, 0, 0], ... [1, 0, 1, 0, 1], ... [0, 1, 0, 1, 0], diff --git a/backtracking/hamiltonian_cycle.py b/backtracking/hamiltonian_cycle.py index e4f2c62d2341..bc85e36b583f 100644 --- a/backtracking/hamiltonian_cycle.py +++ b/backtracking/hamiltonian_cycle.py @@ -1,9 +1,9 @@ """ - A Hamiltonian cycle (Hamiltonian circuit) is a graph cycle + A Hamiltonian cycle (Hamiltonian circuit) is a graph cycle through a graph that visits each node exactly once. - Determining whether such paths and cycles exist in graphs + Determining whether such paths and cycles exist in graphs is the 'Hamiltonian path problem', which is NP-complete. - + Wikipedia: https://en.wikipedia.org/wiki/Hamiltonian_path """ from typing import List @@ -18,7 +18,7 @@ def valid_connection( 2. Next vertex should not be in path If both validations succeeds we return true saying that it is possible to connect this vertices either we return false - + Case 1:Use exact graph as in main function, with initialized values >>> graph = [[0, 1, 0, 1, 0], ... [1, 0, 1, 1, 1], @@ -56,11 +56,11 @@ def util_hamilton_cycle(graph: List[List[int]], path: List[int], curr_ind: int) Recursive Step: 2. Iterate over each vertex Check if next vertex is valid for transiting from current vertex - 2.1 Remember next vertex as next transition + 2.1 Remember next vertex as next transition 2.2 Do recursive call and check if going to this vertex solves problem 2.3 if next vertex leads to solution return True 2.4 else backtrack, delete remembered vertex - + Case 1: Use exact graph as in main function, with initialized values >>> graph = [[0, 1, 0, 1, 0], ... [1, 0, 1, 1, 1], @@ -111,12 +111,12 @@ def hamilton_cycle(graph: List[List[int]], start_index: int = 0) -> List[int]: Wrapper function to call subroutine called util_hamilton_cycle, which will either return array of vertices indicating hamiltonian cycle or an empty list indicating that hamiltonian cycle was not found. - Case 1: - Following graph consists of 5 edges. + Case 1: + Following graph consists of 5 edges. If we look closely, we can see that there are multiple Hamiltonian cycles. - For example one result is when we iterate like: + For example one result is when we iterate like: (0)->(1)->(2)->(4)->(3)->(0) - + (0)---(1)---(2) | / \ | | / \ | @@ -130,10 +130,10 @@ def hamilton_cycle(graph: List[List[int]], start_index: int = 0) -> List[int]: ... [0, 1, 1, 1, 0]] >>> hamilton_cycle(graph) [0, 1, 2, 4, 3, 0] - - Case 2: + + Case 2: Same Graph as it was in Case 1, changed starting index from default to 3 - + (0)---(1)---(2) | / \ | | / \ | @@ -147,11 +147,11 @@ def hamilton_cycle(graph: List[List[int]], start_index: int = 0) -> List[int]: ... [0, 1, 1, 1, 0]] >>> hamilton_cycle(graph, 3) [3, 0, 1, 2, 4, 3] - + Case 3: Following Graph is exactly what it was before, but edge 3-4 is removed. Result is that there is no Hamiltonian Cycle anymore. - + (0)---(1)---(2) | / \ | | / \ | diff --git a/backtracking/minimax.py b/backtracking/minimax.py index af07b8d8171a..4cec0e403ddf 100644 --- a/backtracking/minimax.py +++ b/backtracking/minimax.py @@ -1,10 +1,10 @@ import math """ Minimax helps to achieve maximum score in a game by checking all possible moves - depth is current depth in game tree. + depth is current depth in game tree. nodeIndex is index of current node in scores[]. if move is of maximizer return true else false - leaves of game tree is stored in scores[] + leaves of game tree is stored in scores[] height is maximum height of Game tree """ diff --git a/backtracking/n_queens.py b/backtracking/n_queens.py index 58d9c4279a35..5d95c0970121 100644 --- a/backtracking/n_queens.py +++ b/backtracking/n_queens.py @@ -1,9 +1,9 @@ """ - The nqueens problem is of placing N queens on a N * N + The nqueens problem is of placing N queens on a N * N chess board such that no queen can attack any other queens placed on that chess board. - This means that one queen cannot have any other queen on its horizontal, vertical and + This means that one queen cannot have any other queen on its horizontal, vertical and diagonal lines. """ @@ -12,7 +12,7 @@ def isSafe(board, row, column): """ - This function returns a boolean value True if it is safe to place a queen there considering + This function returns a boolean value True if it is safe to place a queen there considering the current state of the board. Parameters : @@ -40,13 +40,13 @@ def isSafe(board, row, column): def solve(board, row): """ - It creates a state space tree and calls the safe function until it receives a - False Boolean and terminates that branch and backtracks to the next + It creates a state space tree and calls the safe function until it receives a + False Boolean and terminates that branch and backtracks to the next possible solution branch. """ if row >= len(board): """ - If the row number exceeds N we have board with a successful combination + If the row number exceeds N we have board with a successful combination and that combination is appended to the solution list and the board is printed. """ @@ -56,9 +56,9 @@ def solve(board, row): return for i in range(len(board)): """ - For every row it iterates through each column to check if it is feasible to place a + For every row it iterates through each column to check if it is feasible to place a queen there. - If all the combinations for that particular branch are successful the board is + If all the combinations for that particular branch are successful the board is reinitialized for the next possible combination. """ if isSafe(board, row, i): diff --git a/ciphers/decrypt_caesar_with_chi_squared.py b/ciphers/decrypt_caesar_with_chi_squared.py index 3c37631c7b35..4036f9bdc43a 100644 --- a/ciphers/decrypt_caesar_with_chi_squared.py +++ b/ciphers/decrypt_caesar_with_chi_squared.py @@ -1,9 +1,12 @@ +#!/usr/bin/env python3 + + def decrypt_caesar_with_chi_squared( ciphertext: str, cipher_alphabet=None, frequencies_dict=None, case_sensetive: bool = False, -) -> list: +) -> tuple: """ Basic Usage =========== @@ -96,15 +99,19 @@ def decrypt_caesar_with_chi_squared( Further Reading ================ - * http://practicalcryptography.com/cryptanalysis/text-characterisation/chi-squared-statistic/ + * http://practicalcryptography.com/cryptanalysis/text-characterisation/chi-squared- + statistic/ * https://en.wikipedia.org/wiki/Letter_frequency * https://en.wikipedia.org/wiki/Chi-squared_test * https://en.m.wikipedia.org/wiki/Caesar_cipher Doctests ======== - >>> decrypt_caesar_with_chi_squared('dof pz aol jhlzhy jpwoly zv wvwbshy? pa pz avv lhzf av jyhjr!') - (7, 3129.228005747531, 'why is the caesar cipher so popular? it is too easy to crack!') + >>> decrypt_caesar_with_chi_squared( + ... 'dof pz aol jhlzhy jpwoly zv wvwbshy? pa pz avv lhzf av jyhjr!' + ... ) # doctest: +NORMALIZE_WHITESPACE + (7, 3129.228005747531, + 'why is the caesar cipher so popular? it is too easy to crack!') >>> decrypt_caesar_with_chi_squared('crybd cdbsxq') (10, 233.35343938980898, 'short string') @@ -172,7 +179,7 @@ def decrypt_caesar_with_chi_squared( # Append the character if it isn't in the alphabet decrypted_with_shift += letter - chi_squared_statistic = 0 + chi_squared_statistic = 0.0 # Loop through each letter in the decoded message with the shift for letter in decrypted_with_shift: @@ -181,7 +188,8 @@ def decrypt_caesar_with_chi_squared( # Get the amount of times the letter occurs in the message occurrences = decrypted_with_shift.count(letter) - # Get the excepcted amount of times the letter should appear based on letter frequencies + # Get the excepcted amount of times the letter should appear based + # on letter frequencies expected = frequencies[letter] * occurrences # Complete the chi squared statistic formula @@ -194,7 +202,8 @@ def decrypt_caesar_with_chi_squared( # Get the amount of times the letter occurs in the message occurrences = decrypted_with_shift.count(letter) - # Get the excepcted amount of times the letter should appear based on letter frequencies + # Get the excepcted amount of times the letter should appear based + # on letter frequencies expected = frequencies[letter] * occurrences # Complete the chi squared statistic formula @@ -209,7 +218,8 @@ def decrypt_caesar_with_chi_squared( decrypted_with_shift, ] - # Get the most likely cipher by finding the cipher with the smallest chi squared statistic + # Get the most likely cipher by finding the cipher with the smallest chi squared + # statistic most_likely_cipher = min( chi_squared_statistic_values, key=chi_squared_statistic_values.get ) diff --git a/ciphers/elgamal_key_generator.py b/ciphers/elgamal_key_generator.py index cc6b297f2daf..bade678ad201 100644 --- a/ciphers/elgamal_key_generator.py +++ b/ciphers/elgamal_key_generator.py @@ -1,7 +1,9 @@ import os import random import sys -import rabin_miller as rabinMiller, cryptomath_module as cryptoMath + +import cryptomath_module as cryptoMath +import rabin_miller as rabinMiller min_primitive_root = 3 diff --git a/ciphers/mixed_keyword_cypher.py b/ciphers/mixed_keyword_cypher.py index a546e4c781e6..6c5d6dc1d210 100644 --- a/ciphers/mixed_keyword_cypher.py +++ b/ciphers/mixed_keyword_cypher.py @@ -25,7 +25,7 @@ def mixed_keyword(key="college", pt="UNIVERSITY"): for i in key: if i not in temp: temp.append(i) - l = len(temp) + len_temp = len(temp) # print(temp) alpha = [] modalpha = [] @@ -40,17 +40,17 @@ def mixed_keyword(key="college", pt="UNIVERSITY"): k = 0 for i in range(r): t = [] - for j in range(l): + for j in range(len_temp): t.append(temp[k]) if not (k < 25): break k += 1 modalpha.append(t) # print(modalpha) - d = dict() + d = {} j = 0 k = 0 - for j in range(l): + for j in range(len_temp): for i in modalpha: if not (len(i) - 1 >= j): break diff --git a/ciphers/simple_substitution_cipher.py b/ciphers/simple_substitution_cipher.py index 7da18482db8c..4c6d58ceca46 100644 --- a/ciphers/simple_substitution_cipher.py +++ b/ciphers/simple_substitution_cipher.py @@ -1,4 +1,5 @@ -import sys, random +import random +import sys LETTERS = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" diff --git a/ciphers/transposition_cipher_encrypt_decrypt_file.py b/ciphers/transposition_cipher_encrypt_decrypt_file.py index 775df354e117..71e7c4608fdd 100644 --- a/ciphers/transposition_cipher_encrypt_decrypt_file.py +++ b/ciphers/transposition_cipher_encrypt_decrypt_file.py @@ -1,4 +1,7 @@ -import time, os, sys +import os +import sys +import time + import transposition_cipher as transCipher diff --git a/conversions/decimal_to_octal.py b/conversions/decimal_to_octal.py index a89a2be982b8..5341ca3569bb 100644 --- a/conversions/decimal_to_octal.py +++ b/conversions/decimal_to_octal.py @@ -8,7 +8,7 @@ def decimal_to_octal(num: int) -> str: """Convert a Decimal Number to an Octal Number. - + >>> all(decimal_to_octal(i) == oct(i) for i in (0, 2, 8, 64, 65, 216, 255, 256, 512)) True """ diff --git a/data_structures/binary_tree/avl_tree.py b/data_structures/binary_tree/avl_tree.py index 2df747c105ad..cb043cf188b7 100644 --- a/data_structures/binary_tree/avl_tree.py +++ b/data_structures/binary_tree/avl_tree.py @@ -89,8 +89,8 @@ def leftrotation(node): Bl Br UB Br C / UB - - UB = unbalanced node + + UB = unbalanced node """ print("left rotation node:", node.getdata()) ret = node.getleft() @@ -120,11 +120,11 @@ def rightrotation(node): def rlrotation(node): r""" - A A Br + A A Br / \ / \ / \ B C RR Br C LR B A / \ --> / \ --> / / \ - Bl Br B UB Bl UB C + Bl Br B UB Bl UB C \ / UB Bl RR = rightrotation LR = leftrotation @@ -276,13 +276,13 @@ def test(self): if __name__ == "__main__": t = AVLtree() t.traversale() - l = list(range(10)) - random.shuffle(l) - for i in l: + lst = list(range(10)) + random.shuffle(lst) + for i in lst: t.insert(i) t.traversale() - random.shuffle(l) - for i in l: + random.shuffle(lst) + for i in lst: t.del_node(i) t.traversale() diff --git a/data_structures/binary_tree/basic_binary_tree.py b/data_structures/binary_tree/basic_binary_tree.py index 3ed34fc6c68e..9b6e25d5ec56 100644 --- a/data_structures/binary_tree/basic_binary_tree.py +++ b/data_structures/binary_tree/basic_binary_tree.py @@ -1,4 +1,9 @@ -class Node: # This is the Class Node with a constructor that contains data variable to type data and left, right pointers. +class Node: + """ + This is the Class Node with a constructor that contains data variable to type data + and left, right pointers. + """ + def __init__(self, data): self.data = data self.left = None diff --git a/data_structures/binary_tree/lazy_segment_tree.py b/data_structures/binary_tree/lazy_segment_tree.py index acd551b41b96..461996b87c26 100644 --- a/data_structures/binary_tree/lazy_segment_tree.py +++ b/data_structures/binary_tree/lazy_segment_tree.py @@ -16,8 +16,8 @@ def left(self, idx): def right(self, idx): return idx * 2 + 1 - def build(self, idx, l, r, A): - if l == r: + def build(self, idx, l, r, A): # noqa: E741 + if l == r: # noqa: E741 self.st[idx] = A[l - 1] else: mid = (l + r) // 2 @@ -25,14 +25,16 @@ def build(self, idx, l, r, A): self.build(self.right(idx), mid + 1, r, A) self.st[idx] = max(self.st[self.left(idx)], self.st[self.right(idx)]) - # update with O(lg N) (Normal segment tree without lazy update will take O(Nlg N) for each update) - def update( - self, idx, l, r, a, b, val - ): # update(1, 1, N, a, b, v) for update val v to [a,b] - if self.flag[idx] == True: + # update with O(lg N) (Normal segment tree without lazy update will take O(Nlg N) + # for each update) + def update(self, idx, l, r, a, b, val): # noqa: E741 + """ + update(1, 1, N, a, b, v) for update val v to [a,b] + """ + if self.flag[idx] is True: self.st[idx] = self.lazy[idx] self.flag[idx] = False - if l != r: + if l != r: # noqa: E741 self.lazy[self.left(idx)] = self.lazy[idx] self.lazy[self.right(idx)] = self.lazy[idx] self.flag[self.left(idx)] = True @@ -40,9 +42,9 @@ def update( if r < a or l > b: return True - if l >= a and r <= b: + if l >= a and r <= b: # noqa: E741 self.st[idx] = val - if l != r: + if l != r: # noqa: E741 self.lazy[self.left(idx)] = val self.lazy[self.right(idx)] = val self.flag[self.left(idx)] = True @@ -55,18 +57,21 @@ def update( return True # query with O(lg N) - def query(self, idx, l, r, a, b): # query(1, 1, N, a, b) for query max of [a,b] - if self.flag[idx] == True: + def query(self, idx, l, r, a, b): # noqa: E741 + """ + query(1, 1, N, a, b) for query max of [a,b] + """ + if self.flag[idx] is True: self.st[idx] = self.lazy[idx] self.flag[idx] = False - if l != r: + if l != r: # noqa: E741 self.lazy[self.left(idx)] = self.lazy[idx] self.lazy[self.right(idx)] = self.lazy[idx] self.flag[self.left(idx)] = True self.flag[self.right(idx)] = True if r < a or l > b: return -math.inf - if l >= a and r <= b: + if l >= a and r <= b: # noqa: E741 return self.st[idx] mid = (l + r) // 2 q1 = self.query(self.left(idx), l, mid, a, b) diff --git a/data_structures/binary_tree/non_recursive_segment_tree.py b/data_structures/binary_tree/non_recursive_segment_tree.py index 877ee45b5baa..97851af937d9 100644 --- a/data_structures/binary_tree/non_recursive_segment_tree.py +++ b/data_structures/binary_tree/non_recursive_segment_tree.py @@ -1,6 +1,7 @@ """ A non-recursive Segment Tree implementation with range query and single element update, -works virtually with any list of the same type of elements with a "commutative" combiner. +works virtually with any list of the same type of elements with a "commutative" +combiner. Explanation: https://www.geeksforgeeks.org/iterative-segment-tree-range-minimum-query/ @@ -22,7 +23,8 @@ >>> st.update(4, 1) >>> st.query(3, 4) 0 ->>> st = SegmentTree([[1, 2, 3], [3, 2, 1], [1, 1, 1]], lambda a, b: [a[i] + b[i] for i in range(len(a))]) +>>> st = SegmentTree([[1, 2, 3], [3, 2, 1], [1, 1, 1]], lambda a, b: [a[i] + b[i] for i +... in range(len(a))]) >>> st.query(0, 1) [4, 4, 4] >>> st.query(1, 2) @@ -47,7 +49,8 @@ def __init__(self, arr: List[T], fnc: Callable[[T, T], T]) -> None: >>> SegmentTree(['a', 'b', 'c'], lambda a, b: '{}{}'.format(a, b)).query(0, 2) 'abc' - >>> SegmentTree([(1, 2), (2, 3), (3, 4)], lambda a, b: (a[0] + b[0], a[1] + b[1])).query(0, 2) + >>> SegmentTree([(1, 2), (2, 3), (3, 4)], + ... lambda a, b: (a[0] + b[0], a[1] + b[1])).query(0, 2) (6, 9) """ self.N = len(arr) @@ -78,7 +81,7 @@ def update(self, p: int, v: T) -> None: p = p // 2 self.st[p] = self.fn(self.st[p * 2], self.st[p * 2 + 1]) - def query(self, l: int, r: int) -> T: + def query(self, l: int, r: int) -> T: # noqa: E741 """ Get range query value in log(N) time :param l: left element index @@ -95,9 +98,9 @@ def query(self, l: int, r: int) -> T: >>> st.query(2, 3) 7 """ - l, r = l + self.N, r + self.N + l, r = l + self.N, r + self.N # noqa: E741 res = None - while l <= r: + while l <= r: # noqa: E741 if l % 2 == 1: res = self.st[l] if res is None else self.fn(res, self.st[l]) if r % 2 == 0: diff --git a/data_structures/binary_tree/segment_tree.py b/data_structures/binary_tree/segment_tree.py index ad9476b4514b..10451ae68bb2 100644 --- a/data_structures/binary_tree/segment_tree.py +++ b/data_structures/binary_tree/segment_tree.py @@ -15,8 +15,8 @@ def left(self, idx): def right(self, idx): return idx * 2 + 1 - def build(self, idx, l, r): - if l == r: + def build(self, idx, l, r): # noqa: E741 + if l == r: # noqa: E741 self.st[idx] = A[l] else: mid = (l + r) // 2 @@ -27,12 +27,13 @@ def build(self, idx, l, r): def update(self, a, b, val): return self.update_recursive(1, 0, self.N - 1, a - 1, b - 1, val) - def update_recursive( - self, idx, l, r, a, b, val - ): # update(1, 1, N, a, b, v) for update val v to [a,b] + def update_recursive(self, idx, l, r, a, b, val): # noqa: E741 + """ + update(1, 1, N, a, b, v) for update val v to [a,b] + """ if r < a or l > b: return True - if l == r: + if l == r: # noqa: E741 self.st[idx] = val return True mid = (l + r) // 2 @@ -44,12 +45,13 @@ def update_recursive( def query(self, a, b): return self.query_recursive(1, 0, self.N - 1, a - 1, b - 1) - def query_recursive( - self, idx, l, r, a, b - ): # query(1, 1, N, a, b) for query max of [a,b] + def query_recursive(self, idx, l, r, a, b): # noqa: E741 + """ + query(1, 1, N, a, b) for query max of [a,b] + """ if r < a or l > b: return -math.inf - if l >= a and r <= b: + if l >= a and r <= b: # noqa: E741 return self.st[idx] mid = (l + r) // 2 q1 = self.query_recursive(self.left(idx), l, mid, a, b) diff --git a/data_structures/binary_tree/treap.py b/data_structures/binary_tree/treap.py index 26f021445ca4..52b757d584c3 100644 --- a/data_structures/binary_tree/treap.py +++ b/data_structures/binary_tree/treap.py @@ -1,3 +1,5 @@ +# flake8: noqa + from random import random from typing import Tuple @@ -161,7 +163,8 @@ def main(): """After each command, program prints treap""" root = None print( - "enter numbers to create a tree, + value to add value into treap, - value to erase all nodes with value. 'q' to quit. " + "enter numbers to create a tree, + value to add value into treap, " + "- value to erase all nodes with value. 'q' to quit. " ) args = input() diff --git a/data_structures/hashing/quadratic_probing.py b/data_structures/hashing/quadratic_probing.py index 0dd84a5d987c..668ddaa85048 100644 --- a/data_structures/hashing/quadratic_probing.py +++ b/data_structures/hashing/quadratic_probing.py @@ -5,7 +5,7 @@ class QuadraticProbing(HashTable): """ - Basic Hash Table example with open addressing using Quadratic Probing + Basic Hash Table example with open addressing using Quadratic Probing """ def __init__(self, *args, **kwargs): diff --git a/data_structures/heap/binomial_heap.py b/data_structures/heap/binomial_heap.py index ac244023082a..334b444eaaff 100644 --- a/data_structures/heap/binomial_heap.py +++ b/data_structures/heap/binomial_heap.py @@ -1,3 +1,5 @@ +# flake8: noqa + """ Binomial Heap Reference: Advanced Data Structures, Peter Brass diff --git a/data_structures/heap/min_heap.py b/data_structures/heap/min_heap.py index e68853837faa..5b96319197ec 100644 --- a/data_structures/heap/min_heap.py +++ b/data_structures/heap/min_heap.py @@ -66,7 +66,7 @@ def build_heap(self, array): # this is min-heapify method def sift_down(self, idx, array): while True: - l = self.get_left_child_idx(idx) + l = self.get_left_child_idx(idx) # noqa: E741 r = self.get_right_child_idx(idx) smallest = idx @@ -132,7 +132,7 @@ def decrease_key(self, node, newValue): self.sift_up(self.idx_of_element[node]) -## USAGE +# USAGE r = Node("R", -1) b = Node("B", 6) diff --git a/data_structures/linked_list/deque_doubly.py b/data_structures/linked_list/deque_doubly.py index b2e73a8f789b..7025a7ea22f9 100644 --- a/data_structures/linked_list/deque_doubly.py +++ b/data_structures/linked_list/deque_doubly.py @@ -1,5 +1,5 @@ """ -Implementing Deque using DoublyLinkedList ... +Implementing Deque using DoublyLinkedList ... Operations: 1. insertion in the front -> O(1) 2. insertion in the end -> O(1) @@ -61,7 +61,7 @@ def _delete(self, node): class LinkedDeque(_DoublyLinkedBase): def first(self): - """ return first element + """ return first element >>> d = LinkedDeque() >>> d.add_first('A').first() 'A' @@ -84,7 +84,7 @@ def last(self): raise Exception("List is empty") return self._trailer._prev._data - ### DEque Insert Operations (At the front, At the end) ### + # DEque Insert Operations (At the front, At the end) def add_first(self, element): """ insertion in the front @@ -100,7 +100,7 @@ def add_last(self, element): """ return self._insert(self._trailer._prev, element, self._trailer) - ### DEqueu Remove Operations (At the front, At the end) ### + # DEqueu Remove Operations (At the front, At the end) def remove_first(self): """ removal from the front diff --git a/data_structures/linked_list/middle_element_of_linked_list.py b/data_structures/linked_list/middle_element_of_linked_list.py index b845d2f19c20..185c4ccbbb0a 100644 --- a/data_structures/linked_list/middle_element_of_linked_list.py +++ b/data_structures/linked_list/middle_element_of_linked_list.py @@ -43,7 +43,7 @@ def middle_element(self) -> int: -20 >>> link.middle_element() 12 - >>> + >>> """ slow_pointer = self.head fast_pointer = self.head diff --git a/data_structures/stacks/postfix_evaluation.py b/data_structures/stacks/postfix_evaluation.py index 4cee0ba380b0..574acac71c43 100644 --- a/data_structures/stacks/postfix_evaluation.py +++ b/data_structures/stacks/postfix_evaluation.py @@ -22,7 +22,7 @@ def Solve(Postfix): Stack = [] - Div = lambda x, y: int(x / y) # integer division operation + Div = lambda x, y: int(x / y) # noqa: E731 integer division operation Opr = { "^": op.pow, "*": op.mul, @@ -38,29 +38,27 @@ def Solve(Postfix): for x in Postfix: if x.isdigit(): # if x in digit Stack.append(x) # append x to stack - print( - x.rjust(8), ("push(" + x + ")").ljust(12), ",".join(Stack), sep=" | " - ) # output in tabular format + # output in tabular format + print(x.rjust(8), ("push(" + x + ")").ljust(12), ",".join(Stack), sep=" | ") else: B = Stack.pop() # pop stack - print( - "".rjust(8), ("pop(" + B + ")").ljust(12), ",".join(Stack), sep=" | " - ) # output in tabular format + # output in tabular format + print("".rjust(8), ("pop(" + B + ")").ljust(12), ",".join(Stack), sep=" | ") A = Stack.pop() # pop stack - print( - "".rjust(8), ("pop(" + A + ")").ljust(12), ",".join(Stack), sep=" | " - ) # output in tabular format + # output in tabular format + print("".rjust(8), ("pop(" + A + ")").ljust(12), ",".join(Stack), sep=" | ") Stack.append( str(Opr[x](int(A), int(B))) ) # evaluate the 2 values popped from stack & push result to stack + # output in tabular format print( x.rjust(8), ("push(" + A + x + B + ")").ljust(12), ",".join(Stack), sep=" | ", - ) # output in tabular format + ) return int(Stack[0]) diff --git a/data_structures/trie/trie.py b/data_structures/trie/trie.py index 5a560b97c293..6947d97fc642 100644 --- a/data_structures/trie/trie.py +++ b/data_structures/trie/trie.py @@ -1,7 +1,7 @@ """ A Trie/Prefix Tree is a kind of search tree used to provide quick lookup of words/patterns in a set of words. A basic Trie however has O(n^2) space complexity -making it impractical in practice. It however provides O(max(search_string, length of longest word)) +making it impractical in practice. It however provides O(max(search_string, length of longest word)) lookup time making it an optimal approach when space is not an issue. """ diff --git a/digital_image_processing/dithering/burkes.py b/digital_image_processing/dithering/burkes.py index 54a243bd255a..0de21f4c009b 100644 --- a/digital_image_processing/dithering/burkes.py +++ b/digital_image_processing/dithering/burkes.py @@ -54,9 +54,9 @@ def process(self) -> None: current_error = greyscale + self.error_table[x][y] - 255 """ Burkes error propagation (`*` is current pixel): - - * 8/32 4/32 - 2/32 4/32 8/32 4/32 2/32 + + * 8/32 4/32 + 2/32 4/32 8/32 4/32 2/32 """ self.error_table[y][x + 1] += int(8 / 32 * current_error) self.error_table[y][x + 2] += int(4 / 32 * current_error) diff --git a/digital_image_processing/edge_detection/canny.py b/digital_image_processing/edge_detection/canny.py index 6f98fee6308e..6ee3ac5a22ea 100644 --- a/digital_image_processing/edge_detection/canny.py +++ b/digital_image_processing/edge_detection/canny.py @@ -29,8 +29,8 @@ def canny(image, threshold_low=15, threshold_high=30, weak=128, strong=255): dst = np.zeros((image_row, image_col)) """ - Non-maximum suppression. If the edge strength of the current pixel is the largest compared to the other pixels - in the mask with the same direction, the value will be preserved. Otherwise, the value will be suppressed. + Non-maximum suppression. If the edge strength of the current pixel is the largest compared to the other pixels + in the mask with the same direction, the value will be preserved. Otherwise, the value will be suppressed. """ for row in range(1, image_row - 1): for col in range(1, image_col - 1): diff --git a/digital_image_processing/index_calculation.py b/digital_image_processing/index_calculation.py index a34e51e56310..d55815a6e15e 100644 --- a/digital_image_processing/index_calculation.py +++ b/digital_image_processing/index_calculation.py @@ -6,26 +6,28 @@ # Imports import numpy as np + # Class implemented to calculus the index class IndexCalculation: """ # Class Summary - This algorithm consists in calculating vegetation indices, these indices - can be used for precision agriculture for example (or remote sensing). There are - functions to define the data and to calculate the implemented indices. + This algorithm consists in calculating vegetation indices, these + indices can be used for precision agriculture for example (or remote + sensing). There are functions to define the data and to calculate the + implemented indices. # Vegetation index https://en.wikipedia.org/wiki/Vegetation_Index - A Vegetation Index (VI) is a spectral transformation of two or more bands designed - to enhance the contribution of vegetation properties and allow reliable spatial and - temporal inter-comparisons of terrestrial photosynthetic activity and canopy - structural variations - + A Vegetation Index (VI) is a spectral transformation of two or more bands + designed to enhance the contribution of vegetation properties and allow + reliable spatial and temporal inter-comparisons of terrestrial + photosynthetic activity and canopy structural variations + # Information about channels (Wavelength range for each) * nir - near-infrared https://www.malvernpanalytical.com/br/products/technology/near-infrared-spectroscopy Wavelength Range 700 nm to 2500 nm - * Red Edge + * Red Edge https://en.wikipedia.org/wiki/Red_edge Wavelength Range 680 nm to 730 nm * red @@ -38,7 +40,7 @@ class IndexCalculation: https://en.wikipedia.org/wiki/Color Wavelength Range 520 nm to 560 nm - + # Implemented index list #"abbreviationOfIndexName" -- list of channels used @@ -84,17 +86,19 @@ class IndexCalculation: #"NDRE" -- redEdge, nir #list of all index implemented - #allIndex = ["ARVI2", "CCCI", "CVI", "GLI", "NDVI", "BNDVI", "redEdgeNDVI", "GNDVI", - "GBNDVI", "GRNDVI", "RBNDVI", "PNDVI", "ATSAVI", "BWDRVI", "CIgreen", - "CIrededge", "CI", "CTVI", "GDVI", "EVI", "GEMI", "GOSAVI", "GSAVI", - "Hue", "IVI", "IPVI", "I", "RVI", "MRVI", "MSAVI", "NormG", "NormNIR", - "NormR", "NGRDI", "RI", "S", "IF", "DVI", "TVI", "NDRE"] + #allIndex = ["ARVI2", "CCCI", "CVI", "GLI", "NDVI", "BNDVI", "redEdgeNDVI", + "GNDVI", "GBNDVI", "GRNDVI", "RBNDVI", "PNDVI", "ATSAVI", + "BWDRVI", "CIgreen", "CIrededge", "CI", "CTVI", "GDVI", "EVI", + "GEMI", "GOSAVI", "GSAVI", "Hue", "IVI", "IPVI", "I", "RVI", + "MRVI", "MSAVI", "NormG", "NormNIR", "NormR", "NGRDI", "RI", + "S", "IF", "DVI", "TVI", "NDRE"] #list of index with not blue channel - #notBlueIndex = ["ARVI2", "CCCI", "CVI", "NDVI", "redEdgeNDVI", "GNDVI", "GRNDVI", - "ATSAVI", "CIgreen", "CIrededge", "CTVI", "GDVI", "GEMI", "GOSAVI", - "GSAVI", "IVI", "IPVI", "RVI", "MRVI", "MSAVI", "NormG", "NormNIR", - "NormR", "NGRDI", "RI", "DVI", "TVI", "NDRE"] + #notBlueIndex = ["ARVI2", "CCCI", "CVI", "NDVI", "redEdgeNDVI", "GNDVI", + "GRNDVI", "ATSAVI", "CIgreen", "CIrededge", "CTVI", "GDVI", + "GEMI", "GOSAVI", "GSAVI", "IVI", "IPVI", "RVI", "MRVI", + "MSAVI", "NormG", "NormNIR", "NormR", "NGRDI", "RI", "DVI", + "TVI", "NDRE"] #list of index just with RGB channels #RGBIndex = ["GLI", "CI", "Hue", "I", "NGRDI", "RI", "S", "IF"] @@ -121,8 +125,8 @@ def calculation( self, index="", red=None, green=None, blue=None, redEdge=None, nir=None ): """ - performs the calculation of the index with the values instantiated in the class - :str index: abbreviation of index name to perform + performs the calculation of the index with the values instantiated in the class + :str index: abbreviation of index name to perform """ self.setMatrices(red=red, green=green, blue=blue, redEdge=redEdge, nir=nir) funcs = { @@ -213,8 +217,8 @@ def GLI(self): def NDVI(self): """ - Normalized Difference self.nir/self.red Normalized Difference Vegetation Index, - Calibrated NDVI - CDVI + Normalized Difference self.nir/self.red Normalized Difference Vegetation + Index, Calibrated NDVI - CDVI https://www.indexdatabase.de/db/i-single.php?id=58 :return: index """ @@ -222,7 +226,7 @@ def NDVI(self): def BNDVI(self): """ - Normalized Difference self.nir/self.blue self.blue-normalized difference + Normalized Difference self.nir/self.blue self.blue-normalized difference vegetation index https://www.indexdatabase.de/db/i-single.php?id=135 :return: index @@ -410,7 +414,7 @@ def IPVI(self): """ return (self.nir / ((self.nir + self.red) / 2)) * (self.NDVI() + 1) - def I(self): + def I(self): # noqa: E741,E743 """ Intensity https://www.indexdatabase.de/db/i-single.php?id=36 @@ -471,8 +475,9 @@ def NormR(self): def NGRDI(self): """ - Normalized Difference self.green/self.red Normalized self.green self.red - difference index, Visible Atmospherically Resistant Indices self.green (VIself.green) + Normalized Difference self.green/self.red Normalized self.green self.red + difference index, Visible Atmospherically Resistant Indices self.green + (VIself.green) https://www.indexdatabase.de/db/i-single.php?id=390 :return: index """ @@ -506,7 +511,7 @@ def IF(self): def DVI(self): """ - Simple Ratio self.nir/self.red Difference Vegetation Index, Vegetation Index + Simple Ratio self.nir/self.red Difference Vegetation Index, Vegetation Index Number (VIN) https://www.indexdatabase.de/db/i-single.php?id=12 :return: index @@ -535,7 +540,7 @@ def NDRE(self): # Examples of how to use the class -# instantiating the class +# instantiating the class cl = IndexCalculation() # instantiating the class with the values @@ -556,9 +561,12 @@ def NDRE(self): indexValue_form3 = cl.calculation("CCCI", red=red, green=green, blue=blue, redEdge=redEdge, nir=nir).astype(np.float64) -print("Form 1: "+np.array2string(indexValue_form1, precision=20, separator=', ', floatmode='maxprec_equal')) -print("Form 2: "+np.array2string(indexValue_form2, precision=20, separator=', ', floatmode='maxprec_equal')) -print("Form 3: "+np.array2string(indexValue_form3, precision=20, separator=', ', floatmode='maxprec_equal')) +print("Form 1: "+np.array2string(indexValue_form1, precision=20, separator=', ', + floatmode='maxprec_equal')) +print("Form 2: "+np.array2string(indexValue_form2, precision=20, separator=', ', + floatmode='maxprec_equal')) +print("Form 3: "+np.array2string(indexValue_form3, precision=20, separator=', ', + floatmode='maxprec_equal')) # A list of examples results for different type of data at NDVI # float16 -> 0.31567383 #NDVI (red = 50, nir = 100) diff --git a/divide_and_conquer/max_subarray_sum.py b/divide_and_conquer/max_subarray_sum.py index 9e81c83649a6..03bf9d25cb8a 100644 --- a/divide_and_conquer/max_subarray_sum.py +++ b/divide_and_conquer/max_subarray_sum.py @@ -1,10 +1,10 @@ -""" -Given a array of length n, max_subarray_sum() finds +""" +Given a array of length n, max_subarray_sum() finds the maximum of sum of contiguous sub-array using divide and conquer method. Time complexity : O(n log n) -Ref : INTRODUCTION TO ALGORITHMS THIRD EDITION +Ref : INTRODUCTION TO ALGORITHMS THIRD EDITION (section : 4, sub-section : 4.1, page : 70) """ @@ -13,10 +13,10 @@ def max_sum_from_start(array): """ This function finds the maximum contiguous sum of array from 0 index - Parameters : + Parameters : array (list[int]) : given array - - Returns : + + Returns : max_sum (int) : maximum contiguous sum of array from 0 index """ @@ -32,10 +32,10 @@ def max_sum_from_start(array): def max_cross_array_sum(array, left, mid, right): """ This function finds the maximum contiguous sum of left and right arrays - Parameters : - array, left, mid, right (list[int], int, int, int) - - Returns : + Parameters : + array, left, mid, right (list[int], int, int, int) + + Returns : (int) : maximum of sum of contiguous sum of left and right arrays """ @@ -48,11 +48,11 @@ def max_cross_array_sum(array, left, mid, right): def max_subarray_sum(array, left, right): """ Maximum contiguous sub-array sum, using divide and conquer method - Parameters : - array, left, right (list[int], int, int) : + Parameters : + array, left, right (list[int], int, int) : given array, current left index and current right index - - Returns : + + Returns : int : maximum of sum of contiguous sub-array """ diff --git a/divide_and_conquer/mergesort.py b/divide_and_conquer/mergesort.py index d6693eb36a0a..328e3dca316f 100644 --- a/divide_and_conquer/mergesort.py +++ b/divide_and_conquer/mergesort.py @@ -1,5 +1,5 @@ def merge(a, b, m, e): - l = a[b : m + 1] + l = a[b : m + 1] # noqa: E741 r = a[m + 1 : e + 1] k = b i = 0 diff --git a/dynamic_programming/factorial.py b/dynamic_programming/factorial.py index 0269014e7a18..546478441f31 100644 --- a/dynamic_programming/factorial.py +++ b/dynamic_programming/factorial.py @@ -26,9 +26,9 @@ def factorial(num): # factorial of num # uncomment the following to see how recalculations are avoided -##result=[-1]*10 -##result[0]=result[1]=1 -##print(factorial(5)) +# result=[-1]*10 +# result[0]=result[1]=1 +# print(factorial(5)) # print(factorial(3)) # print(factorial(7)) diff --git a/dynamic_programming/iterating_through_submasks.py b/dynamic_programming/iterating_through_submasks.py index edeacc3124fa..28c4ded66495 100644 --- a/dynamic_programming/iterating_through_submasks.py +++ b/dynamic_programming/iterating_through_submasks.py @@ -1,5 +1,5 @@ """ -Author : Syed Faizan (3rd Year Student IIIT Pune) +Author : Syed Faizan (3rd Year Student IIIT Pune) github : faizan2700 You are given a bitmask m and you want to efficiently iterate through all of its submasks. The mask s is submask of m if only bits that were included in @@ -33,7 +33,7 @@ def list_of_submasks(mask: int) -> List[int]: Traceback (most recent call last): ... AssertionError: mask needs to be positive integer, your input 0 - + """ fmt = "mask needs to be positive integer, your input {}" diff --git a/dynamic_programming/longest_common_subsequence.py b/dynamic_programming/longest_common_subsequence.py index a7206b221d96..b319421b9aa2 100644 --- a/dynamic_programming/longest_common_subsequence.py +++ b/dynamic_programming/longest_common_subsequence.py @@ -76,7 +76,7 @@ def longest_common_subsequence(x: str, y: str): expected_subseq = "GTAB" ln, subseq = longest_common_subsequence(a, b) - ## print("len =", ln, ", sub-sequence =", subseq) + print("len =", ln, ", sub-sequence =", subseq) import doctest doctest.testmod() diff --git a/dynamic_programming/longest_increasing_subsequence.py b/dynamic_programming/longest_increasing_subsequence.py index 81b7f8f8ff17..48d5e8e8fade 100644 --- a/dynamic_programming/longest_increasing_subsequence.py +++ b/dynamic_programming/longest_increasing_subsequence.py @@ -1,11 +1,14 @@ """ Author : Mehdi ALAOUI -This is a pure Python implementation of Dynamic Programming solution to the longest increasing subsequence of a given sequence. +This is a pure Python implementation of Dynamic Programming solution to the longest +increasing subsequence of a given sequence. The problem is : -Given an array, to find the longest and increasing sub-array in that given array and return it. -Example: [10, 22, 9, 33, 21, 50, 41, 60, 80] as input will return [10, 22, 33, 41, 60, 80] as output +Given an array, to find the longest and increasing sub-array in that given array and +return it. +Example: [10, 22, 9, 33, 21, 50, 41, 60, 80] as input will return + [10, 22, 33, 41, 60, 80] as output """ from typing import List @@ -21,11 +24,13 @@ def longest_subsequence(array: List[int]) -> List[int]: # This function is recu [8] >>> longest_subsequence([1, 1, 1]) [1, 1, 1] + >>> longest_subsequence([]) + [] """ array_length = len(array) - if ( - array_length <= 1 - ): # If the array contains only one element, we return it (it's the stop condition of recursion) + # If the array contains only one element, we return it (it's the stop condition of + # recursion) + if array_length <= 1: return array # Else pivot = array[0] diff --git a/dynamic_programming/longest_increasing_subsequence_o(nlogn).py b/dynamic_programming/longest_increasing_subsequence_o(nlogn).py index 46790a5a8d41..b33774057db3 100644 --- a/dynamic_programming/longest_increasing_subsequence_o(nlogn).py +++ b/dynamic_programming/longest_increasing_subsequence_o(nlogn).py @@ -1,19 +1,19 @@ ############################# # Author: Aravind Kashyap # File: lis.py -# comments: This programme outputs the Longest Strictly Increasing Subsequence in O(NLogN) -# Where N is the Number of elements in the list +# comments: This programme outputs the Longest Strictly Increasing Subsequence in +# O(NLogN) Where N is the Number of elements in the list ############################# from typing import List -def CeilIndex(v, l, r, key): +def CeilIndex(v, l, r, key): # noqa: E741 while r - l > 1: m = (l + r) // 2 if v[m] >= key: r = m else: - l = m + l = m # noqa: E741 return r @@ -23,7 +23,8 @@ def LongestIncreasingSubsequenceLength(v: List[int]) -> int: 6 >>> LongestIncreasingSubsequenceLength([]) 0 - >>> LongestIncreasingSubsequenceLength([0, 8, 4, 12, 2, 10, 6, 14, 1, 9, 5, 13, 3, 11, 7, 15]) + >>> LongestIncreasingSubsequenceLength([0, 8, 4, 12, 2, 10, 6, 14, 1, 9, 5, 13, 3, + ... 11, 7, 15]) 6 >>> LongestIncreasingSubsequenceLength([5, 4, 3, 2, 1]) 1 diff --git a/dynamic_programming/max_sub_array.py b/dynamic_programming/max_sub_array.py index 7350eaf373cb..284edb5841e4 100644 --- a/dynamic_programming/max_sub_array.py +++ b/dynamic_programming/max_sub_array.py @@ -44,12 +44,12 @@ def max_sub_array(nums: List[int]) -> int: >>> max_sub_array([-2, 1, -3, 4, -1, 2, 1, -5, 4]) 6 - + An empty (sub)array has sum 0. >>> max_sub_array([]) 0 - - If all elements are negative, the largest subarray would be the empty array, + + If all elements are negative, the largest subarray would be the empty array, having the sum 0. >>> max_sub_array([-1, -2, -3]) 0 diff --git a/dynamic_programming/minimum_partition.py b/dynamic_programming/minimum_partition.py index d5750326fea4..8fad4ef3072f 100644 --- a/dynamic_programming/minimum_partition.py +++ b/dynamic_programming/minimum_partition.py @@ -23,7 +23,7 @@ def findMin(arr): dp[i][j] = dp[i][j] or dp[i - 1][j - arr[i - 1]] for j in range(int(s / 2), -1, -1): - if dp[n][j] == True: + if dp[n][j] is True: diff = s - 2 * j break diff --git a/dynamic_programming/optimal_binary_search_tree.py b/dynamic_programming/optimal_binary_search_tree.py index f33ca01bd933..e6f93f85ef0f 100644 --- a/dynamic_programming/optimal_binary_search_tree.py +++ b/dynamic_programming/optimal_binary_search_tree.py @@ -40,7 +40,7 @@ def __str__(self): def print_binary_search_tree(root, key, i, j, parent, is_left): """ Recursive function to print a BST from a root table. - + >>> key = [3, 8, 9, 10, 17, 21] >>> root = [[0, 1, 1, 1, 1, 1], [0, 1, 1, 1, 1, 3], [0, 0, 2, 3, 3, 3], \ [0, 0, 0, 3, 3, 3], [0, 0, 0, 0, 4, 5], [0, 0, 0, 0, 0, 5]] @@ -73,7 +73,7 @@ def find_optimal_binary_search_tree(nodes): The dynamic programming algorithm below runs in O(n^2) time. Implemented from CLRS (Introduction to Algorithms) book. https://en.wikipedia.org/wiki/Introduction_to_Algorithms - + >>> find_optimal_binary_search_tree([Node(12, 8), Node(10, 34), Node(20, 50), \ Node(42, 3), Node(25, 40), Node(37, 30)]) Binary search tree nodes: @@ -104,14 +104,15 @@ def find_optimal_binary_search_tree(nodes): # This 2D array stores the overall tree cost (which's as minimized as possible); # for a single key, cost is equal to frequency of the key. dp = [[freqs[i] if i == j else 0 for j in range(n)] for i in range(n)] - # sum[i][j] stores the sum of key frequencies between i and j inclusive in nodes array + # sum[i][j] stores the sum of key frequencies between i and j inclusive in nodes + # array sum = [[freqs[i] if i == j else 0 for j in range(n)] for i in range(n)] # stores tree roots that will be used later for constructing binary search tree root = [[i if i == j else 0 for j in range(n)] for i in range(n)] - for l in range(2, n + 1): # l is an interval length - for i in range(n - l + 1): - j = i + l - 1 + for interval_length in range(2, n + 1): + for i in range(n - interval_length + 1): + j = i + interval_length - 1 dp[i][j] = sys.maxsize # set the value to "infinity" sum[i][j] = sum[i][j - 1] + freqs[j] diff --git a/dynamic_programming/subset_generation.py b/dynamic_programming/subset_generation.py index 196b81c22045..7d99727dd900 100644 --- a/dynamic_programming/subset_generation.py +++ b/dynamic_programming/subset_generation.py @@ -1,12 +1,15 @@ # Python program to print all subset combinations of n element in given set of r element. -# arr[] ---> Input Array -# data[] ---> Temporary array to store current combination -# start & end ---> Staring and Ending indexes in arr[] -# index ---> Current index in data[] -# r ---> Size of a combination to be printed + + def combination_util(arr, n, r, index, data, i): - # Current combination is ready to be printed, - # print it + """ + Current combination is ready to be printed, print it + arr[] ---> Input Array + data[] ---> Temporary array to store current combination + start & end ---> Staring and Ending indexes in arr[] + index ---> Current index in data[] + r ---> Size of a combination to be printed + """ if index == r: for j in range(r): print(data[j], end=" ") diff --git a/geodesy/lamberts_ellipsoidal_distance.py b/geodesy/lamberts_ellipsoidal_distance.py index 224b9404a5b7..613c779a1b3e 100644 --- a/geodesy/lamberts_ellipsoidal_distance.py +++ b/geodesy/lamberts_ellipsoidal_distance.py @@ -15,8 +15,8 @@ def lamberts_ellipsoidal_distance( Representing the earth as an ellipsoid allows us to approximate distances between points on the surface much better than a sphere. Ellipsoidal formulas treat the Earth as an - oblate ellipsoid which means accounting for the flattening that happens at the North - and South poles. Lambert's formulae provide accuracy on the order of 10 meteres over + oblate ellipsoid which means accounting for the flattening that happens at the North + and South poles. Lambert's formulae provide accuracy on the order of 10 meteres over thousands of kilometeres. Other methods can provide millimeter-level accuracy but this is a simpler method to calculate long range distances without increasing computational intensity. diff --git a/graphs/articulation_points.py b/graphs/articulation_points.py index 3ecc829946e8..7197369de090 100644 --- a/graphs/articulation_points.py +++ b/graphs/articulation_points.py @@ -1,5 +1,5 @@ # Finding Articulation Points in Undirected Graph -def computeAP(l): +def computeAP(l): # noqa: E741 n = len(l) outEdgeCount = 0 low = [0] * n @@ -36,12 +36,12 @@ def dfs(root, at, parent, outEdgeCount): isArt[i] = outEdgeCount > 1 for x in range(len(isArt)): - if isArt[x] == True: + if isArt[x] is True: print(x) # Adjacency list of graph -l = { +data = { 0: [1, 2], 1: [0, 2], 2: [0, 1, 3, 5], @@ -52,4 +52,4 @@ def dfs(root, at, parent, outEdgeCount): 7: [6, 8], 8: [5, 7], } -computeAP(l) +computeAP(data) diff --git a/graphs/basic_graphs.py b/graphs/basic_graphs.py index 8cdde6abc819..1cbd82a2bd08 100644 --- a/graphs/basic_graphs.py +++ b/graphs/basic_graphs.py @@ -1,3 +1,6 @@ +from collections import deque + + if __name__ == "__main__": # Accept No. of Nodes and edges n, m = map(int, input().split(" ")) @@ -72,7 +75,6 @@ def dfs(G, s): Q - Traversal Stack -------------------------------------------------------------------------------- """ -from collections import deque def bfs(G, s): @@ -125,7 +127,6 @@ def dijk(G, s): Topological Sort -------------------------------------------------------------------------------- """ -from collections import deque def topo(G, ind=None, Q=None): @@ -235,10 +236,10 @@ def prim(G, s): def edglist(): n, m = map(int, input().split(" ")) - l = [] + edges = [] for i in range(m): - l.append(map(int, input().split(" "))) - return l, n + edges.append(map(int, input().split(" "))) + return edges, n """ diff --git a/graphs/bellman_ford.py b/graphs/bellman_ford.py index 807e0b0fcdb9..d4d37a365e03 100644 --- a/graphs/bellman_ford.py +++ b/graphs/bellman_ford.py @@ -9,7 +9,7 @@ def printDist(dist, V): def BellmanFord(graph: List[Dict[str, int]], V: int, E: int, src: int) -> int: """ - Returns shortest paths from a vertex src to all + Returns shortest paths from a vertex src to all other vertices. """ mdist = [float("inf") for i in range(V)] diff --git a/graphs/breadth_first_search_shortest_path.py b/graphs/breadth_first_search_shortest_path.py index 514aed6d7211..e556d7966fa3 100644 --- a/graphs/breadth_first_search_shortest_path.py +++ b/graphs/breadth_first_search_shortest_path.py @@ -1,6 +1,8 @@ -"""Breath First Search (BFS) can be used when finding the shortest path +"""Breath First Search (BFS) can be used when finding the shortest path from a given source node to a target node in an unweighted graph. """ +from typing import Dict + graph = { "A": ["B", "C", "E"], "B": ["A", "D", "E"], @@ -11,8 +13,6 @@ "G": ["C"], } -from typing import Dict - class Graph: def __init__(self, graph: Dict[str, str], source_vertex: str) -> None: @@ -46,8 +46,9 @@ def breath_first_search(self) -> None: def shortest_path(self, target_vertex: str) -> str: """This shortest path function returns a string, describing the result: 1.) No path is found. The string is a human readable message to indicate this. - 2.) The shortest path is found. The string is in the form `v1(->v2->v3->...->vn)`, - where v1 is the source vertex and vn is the target vertex, if it exists separately. + 2.) The shortest path is found. The string is in the form + `v1(->v2->v3->...->vn)`, where v1 is the source vertex and vn is the target + vertex, if it exists separately. >>> g = Graph(graph, "G") >>> g.breath_first_search() diff --git a/graphs/check_bipartite_graph_bfs.py b/graphs/check_bipartite_graph_bfs.py index 1ec3e3d1d45f..00b771649b5d 100644 --- a/graphs/check_bipartite_graph_bfs.py +++ b/graphs/check_bipartite_graph_bfs.py @@ -1,21 +1,22 @@ # Check whether Graph is Bipartite or Not using BFS + # A Bipartite Graph is a graph whose vertices can be divided into two independent sets, # U and V such that every edge (u, v) either connects a vertex from U to V or a vertex # from V to U. In other words, for every edge (u, v), either u belongs to U and v to V, # or u belongs to V and v to U. We can also say that there is no edge that connects # vertices of same set. -def checkBipartite(l): +def checkBipartite(graph): queue = [] - visited = [False] * len(l) - color = [-1] * len(l) + visited = [False] * len(graph) + color = [-1] * len(graph) def bfs(): while queue: u = queue.pop(0) visited[u] = True - for neighbour in l[u]: + for neighbour in graph[u]: if neighbour == u: return False @@ -29,16 +30,16 @@ def bfs(): return True - for i in range(len(l)): + for i in range(len(graph)): if not visited[i]: queue.append(i) color[i] = 0 - if bfs() == False: + if bfs() is False: return False return True -# Adjacency List of graph -l = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2]} -print(checkBipartite(l)) +if __name__ == "__main__": + # Adjacency List of graph + print(checkBipartite({0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2]})) diff --git a/graphs/check_bipartite_graph_dfs.py b/graphs/check_bipartite_graph_dfs.py index 6fe54a6723c5..fd644230449c 100644 --- a/graphs/check_bipartite_graph_dfs.py +++ b/graphs/check_bipartite_graph_dfs.py @@ -1,27 +1,28 @@ # Check whether Graph is Bipartite or Not using DFS + # A Bipartite Graph is a graph whose vertices can be divided into two independent sets, # U and V such that every edge (u, v) either connects a vertex from U to V or a vertex # from V to U. In other words, for every edge (u, v), either u belongs to U and v to V, # or u belongs to V and v to U. We can also say that there is no edge that connects # vertices of same set. -def check_bipartite_dfs(l): - visited = [False] * len(l) - color = [-1] * len(l) +def check_bipartite_dfs(graph): + visited = [False] * len(graph) + color = [-1] * len(graph) def dfs(v, c): visited[v] = True color[v] = c - for u in l[v]: + for u in graph[v]: if not visited[u]: dfs(u, 1 - c) - for i in range(len(l)): + for i in range(len(graph)): if not visited[i]: dfs(i, 0) - for i in range(len(l)): - for j in l[i]: + for i in range(len(graph)): + for j in graph[i]: if color[i] == color[j]: return False @@ -29,5 +30,5 @@ def dfs(v, c): # Adjacency list of graph -l = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []} -print(check_bipartite_dfs(l)) +graph = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []} +print(check_bipartite_dfs(graph)) diff --git a/graphs/depth_first_search.py b/graphs/depth_first_search.py index 1206d5ae9252..f3e0ed4ebaa6 100644 --- a/graphs/depth_first_search.py +++ b/graphs/depth_first_search.py @@ -1,6 +1,6 @@ -"""The DFS function simply calls itself recursively for every unvisited child of -its argument. We can emulate that behaviour precisely using a stack of iterators. -Instead of recursively calling with a node, we'll push an iterator to the node's +"""The DFS function simply calls itself recursively for every unvisited child of +its argument. We can emulate that behaviour precisely using a stack of iterators. +Instead of recursively calling with a node, we'll push an iterator to the node's children onto the iterator stack. When the iterator at the top of the stack terminates, we'll pop it off the stack. @@ -21,7 +21,7 @@ def depth_first_search(graph: Dict, start: str) -> Set[int]: :param graph: directed graph in dictionary format :param vertex: starting vectex as a string :returns: the trace of the search - >>> G = { "A": ["B", "C", "D"], "B": ["A", "D", "E"], + >>> G = { "A": ["B", "C", "D"], "B": ["A", "D", "E"], ... "C": ["A", "F"], "D": ["B", "D"], "E": ["B", "F"], ... "F": ["C", "E", "G"], "G": ["F"] } >>> start = "A" diff --git a/graphs/depth_first_search_2.py b/graphs/depth_first_search_2.py index 0593e120b1da..c932e76293ed 100644 --- a/graphs/depth_first_search_2.py +++ b/graphs/depth_first_search_2.py @@ -28,7 +28,7 @@ def DFS(self): # call the recursive helper function for i in range(len(self.vertex)): - if visited[i] == False: + if visited[i] is False: self.DFSRec(i, visited) def DFSRec(self, startVertex, visited): @@ -39,7 +39,7 @@ def DFSRec(self, startVertex, visited): # Recur for all the vertices that are adjacent to this node for i in self.vertex.keys(): - if visited[i] == False: + if visited[i] is False: self.DFSRec(i, visited) diff --git a/graphs/dijkstra.py b/graphs/dijkstra.py index f156602beb6e..d15fcbbfeef0 100644 --- a/graphs/dijkstra.py +++ b/graphs/dijkstra.py @@ -1,6 +1,6 @@ -"""pseudo-code""" - """ +pseudo-code + DIJKSTRA(graph G, start vertex s, destination vertex d): //all nodes initially unexplored @@ -30,7 +30,6 @@ distance between each vertex that makes up the path from start vertex to target vertex. """ - import heapq diff --git a/graphs/dinic.py b/graphs/dinic.py index 4f5e81236984..aaf3a119525c 100644 --- a/graphs/dinic.py +++ b/graphs/dinic.py @@ -37,7 +37,7 @@ def depth_first_search(self, vertex, sink, flow): # Here we calculate the flow that reaches the sink def max_flow(self, source, sink): flow, self.q[0] = 0, source - for l in range(31): # l = 30 maybe faster for random data + for l in range(31): # noqa: E741 l = 30 maybe faster for random data while True: self.lvl, self.ptr = [0] * len(self.q), [0] * len(self.q) qi, qe, self.lvl[source] = 0, 1, 1 diff --git a/graphs/directed_and_undirected_(weighted)_graph.py b/graphs/directed_and_undirected_(weighted)_graph.py index 26c87cd8f4b2..0312e982a9e0 100644 --- a/graphs/directed_and_undirected_(weighted)_graph.py +++ b/graphs/directed_and_undirected_(weighted)_graph.py @@ -71,8 +71,8 @@ def dfs(self, s=-2, d=-1): if len(stack) == 0: return visited - # c is the count of nodes you want and if you leave it or pass -1 to the function the count - # will be random from 10 to 10000 + # c is the count of nodes you want and if you leave it or pass -1 to the function + # the count will be random from 10 to 10000 def fill_graph_randomly(self, c=-1): if c == -1: c = (math.floor(rand.random() * 10000)) + 10 @@ -168,14 +168,14 @@ def cycle_nodes(self): and indirect_parents.count(__[1]) > 0 and not on_the_way_back ): - l = len(stack) - 1 - while True and l >= 0: - if stack[l] == __[1]: + len_stack = len(stack) - 1 + while True and len_stack >= 0: + if stack[len_stack] == __[1]: anticipating_nodes.add(__[1]) break else: - anticipating_nodes.add(stack[l]) - l -= 1 + anticipating_nodes.add(stack[len_stack]) + len_stack -= 1 if visited.count(__[1]) < 1: stack.append(__[1]) visited.append(__[1]) @@ -221,15 +221,15 @@ def has_cycle(self): and indirect_parents.count(__[1]) > 0 and not on_the_way_back ): - l = len(stack) - 1 - while True and l >= 0: - if stack[l] == __[1]: + len_stack_minus_one = len(stack) - 1 + while True and len_stack_minus_one >= 0: + if stack[len_stack_minus_one] == __[1]: anticipating_nodes.add(__[1]) break else: return True - anticipating_nodes.add(stack[l]) - l -= 1 + anticipating_nodes.add(stack[len_stack_minus_one]) + len_stack_minus_one -= 1 if visited.count(__[1]) < 1: stack.append(__[1]) visited.append(__[1]) @@ -341,8 +341,8 @@ def dfs(self, s=-2, d=-1): if len(stack) == 0: return visited - # c is the count of nodes you want and if you leave it or pass -1 to the function the count - # will be random from 10 to 10000 + # c is the count of nodes you want and if you leave it or pass -1 to the function + # the count will be random from 10 to 10000 def fill_graph_randomly(self, c=-1): if c == -1: c = (math.floor(rand.random() * 10000)) + 10 @@ -397,14 +397,14 @@ def cycle_nodes(self): and indirect_parents.count(__[1]) > 0 and not on_the_way_back ): - l = len(stack) - 1 - while True and l >= 0: - if stack[l] == __[1]: + len_stack = len(stack) - 1 + while True and len_stack >= 0: + if stack[len_stack] == __[1]: anticipating_nodes.add(__[1]) break else: - anticipating_nodes.add(stack[l]) - l -= 1 + anticipating_nodes.add(stack[len_stack]) + len_stack -= 1 if visited.count(__[1]) < 1: stack.append(__[1]) visited.append(__[1]) @@ -450,15 +450,15 @@ def has_cycle(self): and indirect_parents.count(__[1]) > 0 and not on_the_way_back ): - l = len(stack) - 1 - while True and l >= 0: - if stack[l] == __[1]: + len_stack_minus_one = len(stack) - 1 + while True and len_stack_minus_one >= 0: + if stack[len_stack_minus_one] == __[1]: anticipating_nodes.add(__[1]) break else: return True - anticipating_nodes.add(stack[l]) - l -= 1 + anticipating_nodes.add(stack[len_stack_minus_one]) + len_stack_minus_one -= 1 if visited.count(__[1]) < 1: stack.append(__[1]) visited.append(__[1]) diff --git a/graphs/eulerian_path_and_circuit_for_undirected_graph.py b/graphs/eulerian_path_and_circuit_for_undirected_graph.py index a2e5cf4da26a..7850933b0201 100644 --- a/graphs/eulerian_path_and_circuit_for_undirected_graph.py +++ b/graphs/eulerian_path_and_circuit_for_undirected_graph.py @@ -9,7 +9,7 @@ def dfs(u, graph, visited_edge, path=[]): path = path + [u] for v in graph[u]: - if visited_edge[u][v] == False: + if visited_edge[u][v] is False: visited_edge[u][v], visited_edge[v][u] = True, True path = dfs(v, graph, visited_edge, path) return path diff --git a/graphs/finding_bridges.py b/graphs/finding_bridges.py index e18a3bafa9c0..6555dd7bc29e 100644 --- a/graphs/finding_bridges.py +++ b/graphs/finding_bridges.py @@ -1,7 +1,7 @@ # Finding Bridges in Undirected Graph -def computeBridges(l): +def computeBridges(graph): id = 0 - n = len(l) # No of vertices in graph + n = len(graph) # No of vertices in graph low = [0] * n visited = [False] * n @@ -9,7 +9,7 @@ def dfs(at, parent, bridges, id): visited[at] = True low[at] = id id += 1 - for to in l[at]: + for to in graph[at]: if to == parent: pass elif not visited[to]: @@ -28,7 +28,7 @@ def dfs(at, parent, bridges, id): print(bridges) -l = { +graph = { 0: [1, 2], 1: [0, 2], 2: [0, 1, 3, 5], @@ -39,4 +39,4 @@ def dfs(at, parent, bridges, id): 7: [6, 8], 8: [5, 7], } -computeBridges(l) +computeBridges(graph) diff --git a/graphs/frequent_pattern_graph_miner.py b/graphs/frequent_pattern_graph_miner.py index aa14fbdd3a3c..ff7063082267 100644 --- a/graphs/frequent_pattern_graph_miner.py +++ b/graphs/frequent_pattern_graph_miner.py @@ -19,7 +19,7 @@ ['ab-e1', 'ac-e3', 'bc-e4', 'bd-e2', 'bh-e12', 'cd-e2', 'df-e8', 'dh-e10'], ['ab-e1', 'ac-e3', 'ad-e5', 'bc-e4', 'bd-e2', 'cd-e2', 'ce-e4', 'de-e1', 'df-e8', 'dg-e5', 'ef-e3', 'eg-e2', 'fg-e6'] - ] +] # fmt: on diff --git a/graphs/kahns_algorithm_long.py b/graphs/kahns_algorithm_long.py index 0651040365d0..fed7517a21e2 100644 --- a/graphs/kahns_algorithm_long.py +++ b/graphs/kahns_algorithm_long.py @@ -1,10 +1,10 @@ # Finding longest distance in Directed Acyclic Graph using KahnsAlgorithm -def longestDistance(l): - indegree = [0] * len(l) +def longestDistance(graph): + indegree = [0] * len(graph) queue = [] - longDist = [1] * len(l) + longDist = [1] * len(graph) - for key, values in l.items(): + for key, values in graph.items(): for i in values: indegree[i] += 1 @@ -14,7 +14,7 @@ def longestDistance(l): while queue: vertex = queue.pop(0) - for x in l[vertex]: + for x in graph[vertex]: indegree[x] -= 1 if longDist[vertex] + 1 > longDist[x]: @@ -27,5 +27,5 @@ def longestDistance(l): # Adjacency list of Graph -l = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []} -longestDistance(l) +graph = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []} +longestDistance(graph) diff --git a/graphs/kahns_algorithm_topo.py b/graphs/kahns_algorithm_topo.py index d50bc9a43d19..bf9f90299361 100644 --- a/graphs/kahns_algorithm_topo.py +++ b/graphs/kahns_algorithm_topo.py @@ -1,11 +1,14 @@ -# Kahn's Algorithm is used to find Topological ordering of Directed Acyclic Graph using BFS -def topologicalSort(l): - indegree = [0] * len(l) +def topologicalSort(graph): + """ + Kahn's Algorithm is used to find Topological ordering of Directed Acyclic Graph + using BFS + """ + indegree = [0] * len(graph) queue = [] topo = [] cnt = 0 - for key, values in l.items(): + for key, values in graph.items(): for i in values: indegree[i] += 1 @@ -17,17 +20,17 @@ def topologicalSort(l): vertex = queue.pop(0) cnt += 1 topo.append(vertex) - for x in l[vertex]: + for x in graph[vertex]: indegree[x] -= 1 if indegree[x] == 0: queue.append(x) - if cnt != len(l): + if cnt != len(graph): print("Cycle exists") else: print(topo) # Adjacency List of Graph -l = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []} -topologicalSort(l) +graph = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []} +topologicalSort(graph) diff --git a/graphs/minimum_spanning_tree_prims.py b/graphs/minimum_spanning_tree_prims.py index 6255b6af64ad..77ff149e2a38 100644 --- a/graphs/minimum_spanning_tree_prims.py +++ b/graphs/minimum_spanning_tree_prims.py @@ -2,7 +2,7 @@ from collections import defaultdict -def PrimsAlgorithm(l): +def PrimsAlgorithm(l): # noqa: E741 nodePosition = [] @@ -109,7 +109,7 @@ def deleteMinimum(heap, positions): e = int(input("Enter number of edges: ").strip()) adjlist = defaultdict(list) for x in range(e): - l = [int(x) for x in input().strip().split()] + l = [int(x) for x in input().strip().split()] # noqa: E741 adjlist[l[0]].append([l[1], l[2]]) adjlist[l[1]].append([l[0], l[2]]) print(PrimsAlgorithm(adjlist)) diff --git a/hashes/chaos_machine.py b/hashes/chaos_machine.py index 8d3bbd4c0251..1bdf984b68de 100644 --- a/hashes/chaos_machine.py +++ b/hashes/chaos_machine.py @@ -79,24 +79,23 @@ def reset(): machine_time = 0 -####################################### - -# Initialization -reset() - -# Pushing Data (Input) -import random - -message = random.sample(range(0xFFFFFFFF), 100) -for chunk in message: - push(chunk) - -# for controlling -inp = "" - -# Pulling Data (Output) -while inp in ("e", "E"): - print("%s" % format(pull(), "#04x")) - print(buffer_space) - print(params_space) - inp = input("(e)exit? ").strip() +if __name__ == "__main__": + # Initialization + reset() + + # Pushing Data (Input) + import random + + message = random.sample(range(0xFFFFFFFF), 100) + for chunk in message: + push(chunk) + + # for controlling + inp = "" + + # Pulling Data (Output) + while inp in ("e", "E"): + print("%s" % format(pull(), "#04x")) + print(buffer_space) + print(params_space) + inp = input("(e)exit? ").strip() diff --git a/hashes/hamming_code.py b/hashes/hamming_code.py index c1ed7fe1d727..14d23ef3cef4 100644 --- a/hashes/hamming_code.py +++ b/hashes/hamming_code.py @@ -47,6 +47,7 @@ # Imports import numpy as np + # Functions of binary conversion-------------------------------------- def text_to_bits(text, encoding="utf-8", errors="surrogatepass"): """ diff --git a/linear_algebra/src/lib.py b/linear_algebra/src/lib.py index f4628f1d964a..10b9da65863f 100644 --- a/linear_algebra/src/lib.py +++ b/linear_algebra/src/lib.py @@ -27,10 +27,10 @@ class Vector: """ This class represents a vector of arbitrary size. - You need to give the vector components. - + You need to give the vector components. + Overview about the methods: - + constructor(components : list) : init the vector set(components : list) : changes the vector components. __str__() : toString method @@ -124,7 +124,7 @@ def __sub__(self, other): def __mul__(self, other): """ - mul implements the scalar multiplication + mul implements the scalar multiplication and the dot-product """ if isinstance(other, float) or isinstance(other, int): @@ -167,7 +167,7 @@ def zeroVector(dimension): def unitBasisVector(dimension, pos): """ - returns a unit basis vector with a One + returns a unit basis vector with a One at index 'pos' (indexing at 0) """ # precondition @@ -196,7 +196,7 @@ def randomVector(N, a, b): """ input: size (N) of the vector. random range (a,b) - output: returns a random vector of size N, with + output: returns a random vector of size N, with random integer components between 'a' and 'b'. """ random.seed(None) @@ -208,10 +208,10 @@ class Matrix: """ class: Matrix This class represents a arbitrary matrix. - + Overview about the methods: - - __str__() : returns a string representation + + __str__() : returns a string representation operator * : implements the matrix vector multiplication implements the matrix-scalar multiplication. changeComponent(x,y,value) : changes the specified component. diff --git a/linear_algebra/src/test_linear_algebra.py b/linear_algebra/src/test_linear_algebra.py index 8d2170e46da4..21fed9529ac0 100644 --- a/linear_algebra/src/test_linear_algebra.py +++ b/linear_algebra/src/test_linear_algebra.py @@ -19,7 +19,7 @@ def test_component(self): x = Vector([1, 2, 3]) self.assertEqual(x.component(0), 1) self.assertEqual(x.component(2), 3) - y = Vector() + _ = Vector() def test_str(self): """ diff --git a/machine_learning/k_means_clust.py b/machine_learning/k_means_clust.py index 7a4f69eb77ce..86a5dd968779 100644 --- a/machine_learning/k_means_clust.py +++ b/machine_learning/k_means_clust.py @@ -11,9 +11,11 @@ Inputs: - X , a 2D numpy array of features. - k , number of clusters to create. - - initial_centroids , initial centroid values generated by utility function(mentioned in usage). + - initial_centroids , initial centroid values generated by utility function(mentioned + in usage). - maxiter , maximum number of iterations to process. - - heterogeneity , empty list that will be filled with hetrogeneity values if passed to kmeans func. + - heterogeneity , empty list that will be filled with hetrogeneity values if passed + to kmeans func. Usage: 1. define 'k' value, 'X' features array and 'hetrogeneity' empty list @@ -22,7 +24,8 @@ initial_centroids = get_initial_centroids( X, k, - seed=0 # seed value for initial centroid generation, None for randomness(default=None) + seed=0 # seed value for initial centroid generation, + # None for randomness(default=None) ) 3. find centroids and clusters using kmeans function. @@ -37,7 +40,8 @@ ) - 4. Plot the loss function, hetrogeneity values for every iteration saved in hetrogeneity list. + 4. Plot the loss function, hetrogeneity values for every iteration saved in + hetrogeneity list. plot_heterogeneity( heterogeneity, k @@ -46,8 +50,9 @@ 5. Have fun.. """ -from sklearn.metrics import pairwise_distances import numpy as np +from matplotlib import pyplot as plt +from sklearn.metrics import pairwise_distances TAG = "K-MEANS-CLUST/ " @@ -118,9 +123,6 @@ def compute_heterogeneity(data, k, centroids, cluster_assignment): return heterogeneity -from matplotlib import pyplot as plt - - def plot_heterogeneity(heterogeneity, k): plt.figure(figsize=(7, 4)) plt.plot(heterogeneity, linewidth=4) @@ -136,9 +138,11 @@ def kmeans( ): """This function runs k-means on given data and initial set of centroids. maxiter: maximum number of iterations to run.(default=500) - record_heterogeneity: (optional) a list, to store the history of heterogeneity as function of iterations + record_heterogeneity: (optional) a list, to store the history of heterogeneity + as function of iterations if None, do not store the history. - verbose: if True, print how many data points changed their cluster labels in each iteration""" + verbose: if True, print how many data points changed their cluster labels in + each iteration""" centroids = initial_centroids[:] prev_cluster_assignment = None @@ -149,7 +153,8 @@ def kmeans( # 1. Make cluster assignments using nearest centroids cluster_assignment = assign_clusters(data, centroids) - # 2. Compute a new centroid for each of the k clusters, averaging all data points assigned to that cluster. + # 2. Compute a new centroid for each of the k clusters, averaging all data + # points assigned to that cluster. centroids = revise_centroids(data, k, cluster_assignment) # Check for convergence: if none of the assignments changed, stop diff --git a/machine_learning/linear_discriminant_analysis.py b/machine_learning/linear_discriminant_analysis.py index 6998db1ce4a0..01be288ea64a 100644 --- a/machine_learning/linear_discriminant_analysis.py +++ b/machine_learning/linear_discriminant_analysis.py @@ -186,7 +186,8 @@ def predict_y_values( >>> means = [5.011267842911003, 10.011267842911003, 15.011267842911002] >>> variance = 0.9618530973487494 >>> probabilities = [0.3333333333333333, 0.3333333333333333, 0.3333333333333333] - >>> predict_y_values(x_items, means, variance, probabilities) # doctest: +NORMALIZE_WHITESPACE + >>> predict_y_values(x_items, means, variance, + ... probabilities) # doctest: +NORMALIZE_WHITESPACE [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2] @@ -211,7 +212,7 @@ def predict_y_values( # appending discriminant values of each item to 'results' list results.append(temp) - return [l.index(max(l)) for l in results] + return [result.index(max(result)) for result in results] # Calculating Accuracy diff --git a/machine_learning/polymonial_regression.py b/machine_learning/polymonial_regression.py index 7b080715b762..cdcb90b8fd21 100644 --- a/machine_learning/polymonial_regression.py +++ b/machine_learning/polymonial_regression.py @@ -1,5 +1,12 @@ import matplotlib.pyplot as plt import pandas as pd +from sklearn.linear_model import LinearRegression + +# Splitting the dataset into the Training set and Test set +from sklearn.model_selection import train_test_split + +# Fitting Polynomial Regression to the dataset +from sklearn.preprocessing import PolynomialFeatures # Importing the dataset dataset = pd.read_csv( @@ -9,16 +16,9 @@ y = dataset.iloc[:, 2].values -# Splitting the dataset into the Training set and Test set -from sklearn.model_selection import train_test_split - X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0) -# Fitting Polynomial Regression to the dataset -from sklearn.preprocessing import PolynomialFeatures -from sklearn.linear_model import LinearRegression - poly_reg = PolynomialFeatures(degree=4) X_poly = poly_reg.fit_transform(X) pol_reg = LinearRegression() diff --git a/machine_learning/scoring_functions.py b/machine_learning/scoring_functions.py index 2b891d4eb9d5..a401df139748 100755 --- a/machine_learning/scoring_functions.py +++ b/machine_learning/scoring_functions.py @@ -14,6 +14,7 @@ and types of data """ + # Mean Absolute Error def mae(predict, actual): """ diff --git a/maths/aliquot_sum.py b/maths/aliquot_sum.py index ac5fa58f41cf..c8635bd61237 100644 --- a/maths/aliquot_sum.py +++ b/maths/aliquot_sum.py @@ -9,7 +9,7 @@ def aliquot_sum(input_num: int) -> int: @return: the aliquot sum of input_num, if input_num is positive. Otherwise, raise a ValueError Wikipedia Explanation: https://en.wikipedia.org/wiki/Aliquot_sum - + >>> aliquot_sum(15) 9 >>> aliquot_sum(6) diff --git a/maths/allocation_number.py b/maths/allocation_number.py index fd002b0c4361..c6f1e562f878 100644 --- a/maths/allocation_number.py +++ b/maths/allocation_number.py @@ -4,8 +4,8 @@ def allocation_num(number_of_bytes: int, partitions: int) -> List[str]: """ Divide a number of bytes into x partitions. - - In a multi-threaded download, this algorithm could be used to provide + + In a multi-threaded download, this algorithm could be used to provide each worker thread with a block of non-overlapping bytes to download. For example: for i in allocation_list: diff --git a/maths/bailey_borwein_plouffe.py b/maths/bailey_borwein_plouffe.py index 50a53c793867..be97acfd063e 100644 --- a/maths/bailey_borwein_plouffe.py +++ b/maths/bailey_borwein_plouffe.py @@ -1,16 +1,16 @@ def bailey_borwein_plouffe(digit_position: int, precision: int = 1000) -> str: """ - Implement a popular pi-digit-extraction algorithm known as the + Implement a popular pi-digit-extraction algorithm known as the Bailey-Borwein-Plouffe (BBP) formula to calculate the nth hex digit of pi. Wikipedia page: https://en.wikipedia.org/wiki/Bailey%E2%80%93Borwein%E2%80%93Plouffe_formula - @param digit_position: a positive integer representing the position of the digit to extract. + @param digit_position: a positive integer representing the position of the digit to extract. The digit immediately after the decimal point is located at position 1. @param precision: number of terms in the second summation to calculate. A higher number reduces the chance of an error but increases the runtime. @return: a hexadecimal digit representing the digit at the nth position in pi's decimal expansion. - + >>> "".join(bailey_borwein_plouffe(i) for i in range(1, 11)) '243f6a8885' >>> bailey_borwein_plouffe(5, 10000) @@ -59,11 +59,11 @@ def _subsum( # only care about first digit of fractional part; don't need decimal """ Private helper function to implement the summation - functionality. + functionality. @param digit_pos_to_extract: digit position to extract @param denominator_addend: added to denominator of fractions in the formula @param precision: same as precision in main function - @return: floating-point number whose integer part is not important + @return: floating-point number whose integer part is not important """ sum = 0.0 for sum_index in range(digit_pos_to_extract + precision): diff --git a/maths/collatz_sequence.py b/maths/collatz_sequence.py index d3eb6e756dcd..6ace77312732 100644 --- a/maths/collatz_sequence.py +++ b/maths/collatz_sequence.py @@ -18,8 +18,9 @@ def collatz_sequence(n: int) -> List[int]: Traceback (most recent call last): ... Exception: Sequence only defined for natural numbers - >>> collatz_sequence(43) - [43, 130, 65, 196, 98, 49, 148, 74, 37, 112, 56, 28, 14, 7, 22, 11, 34, 17, 52, 26, 13, 40, 20, 10, 5, 16, 8, 4, 2, 1] + >>> collatz_sequence(43) # doctest: +NORMALIZE_WHITESPACE + [43, 130, 65, 196, 98, 49, 148, 74, 37, 112, 56, 28, 14, 7, + 22, 11, 34, 17, 52, 26, 13, 40, 20, 10, 5, 16, 8, 4, 2, 1] """ if not isinstance(n, int) or n < 1: diff --git a/maths/find_max_recursion.py b/maths/find_max_recursion.py index fc10ecf3757a..03fb81950dcb 100644 --- a/maths/find_max_recursion.py +++ b/maths/find_max_recursion.py @@ -6,7 +6,7 @@ def find_max(nums, left, right): :param left: index of first element :param right: index of last element :return: max in nums - + >>> nums = [1, 3, 5, 7, 9, 2, 4, 6, 8, 10] >>> find_max(nums, 0, len(nums) - 1) == max(nums) True diff --git a/maths/gamma.py b/maths/gamma.py index ef5e7dae6187..98b327fa2f99 100644 --- a/maths/gamma.py +++ b/maths/gamma.py @@ -6,8 +6,8 @@ def gamma(num: float) -> float: """ https://en.wikipedia.org/wiki/Gamma_function - In mathematics, the gamma function is one commonly - used extension of the factorial function to complex numbers. + In mathematics, the gamma function is one commonly + used extension of the factorial function to complex numbers. The gamma function is defined for all complex numbers except the non-positive integers @@ -16,7 +16,7 @@ def gamma(num: float) -> float: ... ValueError: math domain error - + >>> gamma(0) Traceback (most recent call last): @@ -27,12 +27,12 @@ def gamma(num: float) -> float: >>> gamma(9) 40320.0 - >>> from math import gamma as math_gamma + >>> from math import gamma as math_gamma >>> all(gamma(i)/math_gamma(i) <= 1.000000001 and abs(gamma(i)/math_gamma(i)) > .99999999 for i in range(1, 50)) True - >>> from math import gamma as math_gamma + >>> from math import gamma as math_gamma >>> gamma(-1)/math_gamma(-1) <= 1.000000001 Traceback (most recent call last): ... @@ -40,7 +40,7 @@ def gamma(num: float) -> float: >>> from math import gamma as math_gamma - >>> gamma(3.3) - math_gamma(3.3) <= 0.00000001 + >>> gamma(3.3) - math_gamma(3.3) <= 0.00000001 True """ diff --git a/maths/gaussian.py b/maths/gaussian.py index ffea20fb2ba1..edd52d1a4b2c 100644 --- a/maths/gaussian.py +++ b/maths/gaussian.py @@ -12,7 +12,7 @@ def gaussian(x, mu: float = 0.0, sigma: float = 1.0) -> int: """ >>> gaussian(1) 0.24197072451914337 - + >>> gaussian(24) 3.342714441794458e-126 @@ -25,7 +25,7 @@ def gaussian(x, mu: float = 0.0, sigma: float = 1.0) -> int: 1.33830226e-04, 1.48671951e-06, 6.07588285e-09, 9.13472041e-12, 5.05227108e-15, 1.02797736e-18, 7.69459863e-23, 2.11881925e-27, 2.14638374e-32, 7.99882776e-38, 1.09660656e-43]) - + >>> gaussian(15) 5.530709549844416e-50 diff --git a/maths/is_square_free.py b/maths/is_square_free.py index acc13fa5f833..6d27d0af3387 100644 --- a/maths/is_square_free.py +++ b/maths/is_square_free.py @@ -13,12 +13,12 @@ def is_square_free(factors: List[int]) -> bool: returns True if the factors are square free. >>> is_square_free([1, 1, 2, 3, 4]) False - + These are wrong but should return some value it simply checks for repition in the numbers. >>> is_square_free([1, 3, 4, 'sd', 0.0]) True - + >>> is_square_free([1, 0.5, 2, 0.0]) True >>> is_square_free([1, 2, 2, 5]) diff --git a/maths/kth_lexicographic_permutation.py b/maths/kth_lexicographic_permutation.py index 1820be7274e3..491c1c84fa85 100644 --- a/maths/kth_lexicographic_permutation.py +++ b/maths/kth_lexicographic_permutation.py @@ -1,15 +1,15 @@ def kthPermutation(k, n): """ - Finds k'th lexicographic permutation (in increasing order) of + Finds k'th lexicographic permutation (in increasing order) of 0,1,2,...n-1 in O(n^2) time. - + Examples: First permutation is always 0,1,2,...n >>> kthPermutation(0,5) [0, 1, 2, 3, 4] - + The order of permutation of 0,1,2,3 is [0,1,2,3], [0,1,3,2], [0,2,1,3], - [0,2,3,1], [0,3,1,2], [0,3,2,1], [1,0,2,3], [1,0,3,2], [1,2,0,3], + [0,2,3,1], [0,3,1,2], [0,3,2,1], [1,0,2,3], [1,0,3,2], [1,2,0,3], [1,2,3,0], [1,3,0,2] >>> kthPermutation(10,4) [1, 3, 0, 2] diff --git a/maths/lucas_lehmer_primality_test.py b/maths/lucas_lehmer_primality_test.py index 8dac658f16d1..33e4a2141efc 100644 --- a/maths/lucas_lehmer_primality_test.py +++ b/maths/lucas_lehmer_primality_test.py @@ -1,12 +1,12 @@ """ In mathematics, the Lucas–Lehmer test (LLT) is a primality test for Mersenne numbers. https://en.wikipedia.org/wiki/Lucas%E2%80%93Lehmer_primality_test - + A Mersenne number is a number that is one less than a power of two. That is M_p = 2^p - 1 https://en.wikipedia.org/wiki/Mersenne_prime - - The Lucas–Lehmer test is the primality test used by the + + The Lucas–Lehmer test is the primality test used by the Great Internet Mersenne Prime Search (GIMPS) to locate large primes. """ @@ -17,10 +17,10 @@ def lucas_lehmer_test(p: int) -> bool: """ >>> lucas_lehmer_test(p=7) True - + >>> lucas_lehmer_test(p=11) False - + # M_11 = 2^11 - 1 = 2047 = 23 * 89 """ diff --git a/maths/matrix_exponentiation.py b/maths/matrix_exponentiation.py index 56d03b56fc1f..574269050fd8 100644 --- a/maths/matrix_exponentiation.py +++ b/maths/matrix_exponentiation.py @@ -4,7 +4,7 @@ """ Matrix Exponentiation is a technique to solve linear recurrences in logarithmic time. -You read more about it here: +You read more about it here: http://zobayer.blogspot.com/2010/11/matrix-exponentiation.html https://www.hackerearth.com/practice/notes/matrix-exponentiation-1/ """ diff --git a/maths/modular_exponential.py b/maths/modular_exponential.py index 8b7b17575a33..9cb171477ff0 100644 --- a/maths/modular_exponential.py +++ b/maths/modular_exponential.py @@ -1,6 +1,6 @@ """ Modular Exponential. - Modular exponentiation is a type of exponentiation performed over a modulus. + Modular exponentiation is a type of exponentiation performed over a modulus. For more explanation, please check https://en.wikipedia.org/wiki/Modular_exponentiation """ diff --git a/maths/monte_carlo.py b/maths/monte_carlo.py index dedca9f6cdf5..28027cbe4178 100644 --- a/maths/monte_carlo.py +++ b/maths/monte_carlo.py @@ -45,13 +45,13 @@ def area_under_curve_estimator( ) -> float: """ An implementation of the Monte Carlo method to find area under - a single variable non-negative real-valued continuous function, - say f(x), where x lies within a continuous bounded interval, - say [min_value, max_value], where min_value and max_value are + a single variable non-negative real-valued continuous function, + say f(x), where x lies within a continuous bounded interval, + say [min_value, max_value], where min_value and max_value are finite numbers - 1. Let x be a uniformly distributed random variable between min_value to + 1. Let x be a uniformly distributed random variable between min_value to max_value - 2. Expected value of f(x) = + 2. Expected value of f(x) = (integrate f(x) from min_value to max_value)/(max_value - min_value) 3. Finding expected value of f(x): a. Repeatedly draw x from uniform distribution diff --git a/maths/newton_raphson.py b/maths/newton_raphson.py index c4975c73e037..7b16d2dd9b2e 100644 --- a/maths/newton_raphson.py +++ b/maths/newton_raphson.py @@ -24,7 +24,7 @@ def newton_raphson(f, x0=0, maxiter=100, step=0.0001, maxerror=1e-6, logsteps=Fa a = x0 # set the initial guess steps = [a] error = abs(f(a)) - f1 = lambda x: calc_derivative(f, x, h=step) # Derivative of f(x) + f1 = lambda x: calc_derivative(f, x, h=step) # noqa: E731 Derivative of f(x) for _ in range(maxiter): if f1(a) == 0: raise ValueError("No converging solution found") @@ -44,7 +44,7 @@ def newton_raphson(f, x0=0, maxiter=100, step=0.0001, maxerror=1e-6, logsteps=Fa if __name__ == "__main__": import matplotlib.pyplot as plt - f = lambda x: m.tanh(x) ** 2 - m.exp(3 * x) + f = lambda x: m.tanh(x) ** 2 - m.exp(3 * x) # noqa: E731 solution, error, steps = newton_raphson( f, x0=10, maxiter=1000, step=1e-6, logsteps=True ) diff --git a/maths/prime_factors.py b/maths/prime_factors.py index eb3de00de6a7..34795dd98d1a 100644 --- a/maths/prime_factors.py +++ b/maths/prime_factors.py @@ -7,7 +7,7 @@ def prime_factors(n: int) -> List[int]: """ Returns prime factors of n as a list. - + >>> prime_factors(0) [] >>> prime_factors(100) diff --git a/maths/prime_sieve_eratosthenes.py b/maths/prime_sieve_eratosthenes.py index 05363cf62953..0ebdfdb94e15 100644 --- a/maths/prime_sieve_eratosthenes.py +++ b/maths/prime_sieve_eratosthenes.py @@ -1,11 +1,13 @@ +# flake8: noqa + """ Sieve of Eratosthenes Input : n =10 -Output : 2 3 5 7 +Output: 2 3 5 7 Input : n = 20 -Output: 2 3 5 7 11 13 17 19 +Output: 2 3 5 7 11 13 17 19 you can read in detail about this at https://en.wikipedia.org/wiki/Sieve_of_Eratosthenes diff --git a/maths/radians.py b/maths/radians.py index 3788b3e8a3a0..465467a3ba08 100644 --- a/maths/radians.py +++ b/maths/radians.py @@ -14,8 +14,8 @@ def radians(degree: float) -> float: 4.782202150464463 >>> radians(109.82) 1.9167205845401725 - - >>> from math import radians as math_radians + + >>> from math import radians as math_radians >>> all(abs(radians(i)-math_radians(i)) <= 0.00000001 for i in range(-2, 361)) True """ diff --git a/maths/radix2_fft.py b/maths/radix2_fft.py index 3911fea1d04d..de87071e5440 100644 --- a/maths/radix2_fft.py +++ b/maths/radix2_fft.py @@ -12,36 +12,36 @@ class FFT: Reference: https://en.wikipedia.org/wiki/Cooley%E2%80%93Tukey_FFT_algorithm#The_radix-2_DIT_case - - For polynomials of degree m and n the algorithms has complexity + + For polynomials of degree m and n the algorithms has complexity O(n*logn + m*logm) - + The main part of the algorithm is split in two parts: - 1) __DFT: We compute the discrete fourier transform (DFT) of A and B using a - bottom-up dynamic approach - + 1) __DFT: We compute the discrete fourier transform (DFT) of A and B using a + bottom-up dynamic approach - 2) __multiply: Once we obtain the DFT of A*B, we can similarly invert it to obtain A*B - The class FFT takes two polynomials A and B with complex coefficients as arguments; + The class FFT takes two polynomials A and B with complex coefficients as arguments; The two polynomials should be represented as a sequence of coefficients starting - from the free term. Thus, for instance x + 2*x^3 could be represented as - [0,1,0,2] or (0,1,0,2). The constructor adds some zeros at the end so that the - polynomials have the same length which is a power of 2 at least the length of - their product. - + from the free term. Thus, for instance x + 2*x^3 could be represented as + [0,1,0,2] or (0,1,0,2). The constructor adds some zeros at the end so that the + polynomials have the same length which is a power of 2 at least the length of + their product. + Example: - + Create two polynomials as sequences >>> A = [0, 1, 0, 2] # x+2x^3 >>> B = (2, 3, 4, 0) # 2+3x+4x^2 - + Create an FFT object with them >>> x = FFT(A, B) - + Print product >>> print(x.product) # 2x + 3x^2 + 8x^3 + 4x^4 + 6x^5 [(-0+0j), (2+0j), (3+0j), (8+0j), (6+0j), (8+0j)] - + __str__ test >>> print(x) A = 0*x^0 + 1*x^1 + 2*x^0 + 3*x^2 diff --git a/maths/sieve_of_eratosthenes.py b/maths/sieve_of_eratosthenes.py index 4761c9339ea0..9f2960dc134a 100644 --- a/maths/sieve_of_eratosthenes.py +++ b/maths/sieve_of_eratosthenes.py @@ -16,7 +16,7 @@ def sieve(n): """ Returns a list with all prime numbers up to n. - + >>> sieve(50) [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47] >>> sieve(25) @@ -31,7 +31,7 @@ def sieve(n): [] """ - l = [True] * (n + 1) + l = [True] * (n + 1) # noqa: E741 prime = [] start = 2 end = int(math.sqrt(n)) diff --git a/maths/square_root.py b/maths/square_root.py index d4c5e311b0b7..fe775828c8c5 100644 --- a/maths/square_root.py +++ b/maths/square_root.py @@ -24,10 +24,10 @@ def square_root_iterative( """ Square root is aproximated using Newtons method. https://en.wikipedia.org/wiki/Newton%27s_method - + >>> all(abs(square_root_iterative(i)-math.sqrt(i)) <= .00000000000001 for i in range(0, 500)) True - + >>> square_root_iterative(-1) Traceback (most recent call last): ... diff --git a/matrix/sherman_morrison.py b/matrix/sherman_morrison.py index 257cf33712d5..91a70d189fc1 100644 --- a/matrix/sherman_morrison.py +++ b/matrix/sherman_morrison.py @@ -11,7 +11,7 @@ def __init__(self, row: int, column: int, default_value: float = 0): Example: >>> a = Matrix(2, 3, 1) - >>> a + >>> a Matrix consist of 2 rows and 3 columns [1, 1, 1] [1, 1, 1] @@ -186,10 +186,10 @@ def transpose(self): Example: >>> a = Matrix(2, 3) - >>> for r in range(2): + >>> for r in range(2): ... for c in range(3): ... a[r,c] = r*c - ... + ... >>> a.transpose() Matrix consist of 3 rows and 2 columns [0, 0] @@ -209,14 +209,14 @@ def ShermanMorrison(self, u, v): Apply Sherman-Morrison formula in O(n^2). To learn this formula, please look this: https://en.wikipedia.org/wiki/Sherman%E2%80%93Morrison_formula This method returns (A + uv^T)^(-1) where A^(-1) is self. Returns None if it's impossible to calculate. - Warning: This method doesn't check if self is invertible. + Warning: This method doesn't check if self is invertible. Make sure self is invertible before execute this method. Example: >>> ainv = Matrix(3, 3, 0) >>> for i in range(3): ainv[i,i] = 1 - ... - >>> u = Matrix(3, 1, 0) + ... + >>> u = Matrix(3, 1, 0) >>> u[0,0], u[1,0], u[2,0] = 1, 2, -3 >>> v = Matrix(3, 1, 0) >>> v[0,0], v[1,0], v[2,0] = 4, -2, 5 diff --git a/networking_flow/ford_fulkerson.py b/networking_flow/ford_fulkerson.py index 0028c7cc577f..96b782649774 100644 --- a/networking_flow/ford_fulkerson.py +++ b/networking_flow/ford_fulkerson.py @@ -16,7 +16,7 @@ def BFS(graph, s, t, parent): while queue: u = queue.pop(0) for ind in range(len(graph[u])): - if visited[ind] == False and graph[u][ind] > 0: + if visited[ind] is False and graph[u][ind] > 0: queue.append(ind) visited[ind] = True parent[ind] = u diff --git a/networking_flow/minimum_cut.py b/networking_flow/minimum_cut.py index 0f6781fbb88c..d79f3619caf1 100644 --- a/networking_flow/minimum_cut.py +++ b/networking_flow/minimum_cut.py @@ -19,7 +19,7 @@ def BFS(graph, s, t, parent): while queue: u = queue.pop(0) for ind in range(len(graph[u])): - if visited[ind] == False and graph[u][ind] > 0: + if visited[ind] is False and graph[u][ind] > 0: queue.append(ind) visited[ind] = True parent[ind] = u diff --git a/other/activity_selection.py b/other/activity_selection.py index 4e8e6c78e3f5..8876eb2930fc 100644 --- a/other/activity_selection.py +++ b/other/activity_selection.py @@ -1,7 +1,9 @@ -"""The following implementation assumes that the activities +# flake8: noqa + +"""The following implementation assumes that the activities are already sorted according to their finish time""" -"""Prints a maximum set of activities that can be done by a +"""Prints a maximum set of activities that can be done by a single person, one at a time""" # n --> Total number of activities # start[]--> An array that contains start time of all activities @@ -10,8 +12,8 @@ def printMaxActivities(start, finish): """ - >>> start = [1, 3, 0, 5, 8, 5] - >>> finish = [2, 4, 6, 7, 9, 9] + >>> start = [1, 3, 0, 5, 8, 5] + >>> finish = [2, 4, 6, 7, 9, 9] >>> printMaxActivities(start, finish) The following activities are selected: 0 1 3 4 diff --git a/other/anagrams.py b/other/anagrams.py index 471413194498..0be013d5bc47 100644 --- a/other/anagrams.py +++ b/other/anagrams.py @@ -1,4 +1,7 @@ -import collections, pprint, time, os +import collections +import os +import pprint +import time start_time = time.time() print("creating word list...") diff --git a/other/detecting_english_programmatically.py b/other/detecting_english_programmatically.py index 4b0bb37ce520..44fb7191866b 100644 --- a/other/detecting_english_programmatically.py +++ b/other/detecting_english_programmatically.py @@ -55,6 +55,7 @@ def isEnglish(message, wordPercentage=20, letterPercentage=85): return wordsMatch and lettersMatch -import doctest +if __name__ == "__main__": + import doctest -doctest.testmod() + doctest.testmod() diff --git a/other/dijkstra_bankers_algorithm.py b/other/dijkstra_bankers_algorithm.py index 1f78941d3afc..ab4fba4c3bd1 100644 --- a/other/dijkstra_bankers_algorithm.py +++ b/other/dijkstra_bankers_algorithm.py @@ -145,7 +145,7 @@ def main(self, **kwargs) -> None: Process 5 is executing. Updated available resource stack for processes: 8 5 9 7 The process is in a safe state. - + """ need_list = self.__need() alloc_resources_table = self.__allocated_resources_table diff --git a/other/game_of_life.py b/other/game_of_life.py index 2b4d1116fa8c..688ee1f282b3 100644 --- a/other/game_of_life.py +++ b/other/game_of_life.py @@ -1,4 +1,4 @@ -"""Conway's Game Of Life, Author Anurag Kumar(mailto:anuragkumarak95@gmail.com) +"""Conway's Game Of Life, Author Anurag Kumar(mailto:anuragkumarak95@gmail.com) Requirements: - numpy @@ -13,7 +13,7 @@ - $python3 game_o_life Game-Of-Life Rules: - + 1. Any live cell with fewer than two live neighbours dies, as if caused by under-population. @@ -27,8 +27,10 @@ Any dead cell with exactly three live neighbours be- comes a live cell, as if by reproduction. """ +import random +import sys + import numpy as np -import random, sys from matplotlib import pyplot as plt from matplotlib.colors import ListedColormap diff --git a/other/integeration_by_simpson_approx.py b/other/integeration_by_simpson_approx.py index 0f7bfacf030a..f88d3a0f0173 100644 --- a/other/integeration_by_simpson_approx.py +++ b/other/integeration_by_simpson_approx.py @@ -43,14 +43,14 @@ def simpson_integration(function, a: float, b: float, precision: int = 4) -> flo Returns: result : the value of the approximated integration of function in range a to b - + Raises: AssertionError: function is not callable AssertionError: a is not float or integer AssertionError: function should return float or integer AssertionError: b is not float or integer AssertionError: precision is not positive integer - + >>> simpson_integration(lambda x : x*x,1,2,3) 2.333 @@ -72,7 +72,7 @@ def simpson_integration(function, a: float, b: float, precision: int = 4) -> flo Traceback (most recent call last): ... AssertionError: the function(object) passed should be callable your input : wrong_input - + >>> simpson_integration(lambda x : x*x,3.45,3.2,1) -2.8 @@ -85,7 +85,7 @@ def simpson_integration(function, a: float, b: float, precision: int = 4) -> flo Traceback (most recent call last): ... AssertionError: precision should be positive integer your input : -1 - + """ assert callable( function diff --git a/other/magicdiamondpattern.py b/other/magicdiamondpattern.py index 4ca698d80c28..37b5e4809f47 100644 --- a/other/magicdiamondpattern.py +++ b/other/magicdiamondpattern.py @@ -1,5 +1,6 @@ # Python program for generating diamond pattern in Python 3.7+ + # Function to print upper half of diamond (pyramid) def floyd(n): """ diff --git a/other/sdes.py b/other/sdes.py index 3038ff193ae9..cfc5a53df2b2 100644 --- a/other/sdes.py +++ b/other/sdes.py @@ -44,9 +44,9 @@ def function(expansion, s0, s1, key, message): right = message[4:] temp = apply_table(right, expansion) temp = XOR(temp, key) - l = apply_sbox(s0, temp[:4]) + l = apply_sbox(s0, temp[:4]) # noqa: E741 r = apply_sbox(s1, temp[4:]) - l = "0" * (2 - len(l)) + l + l = "0" * (2 - len(l)) + l # noqa: E741 r = "0" * (2 - len(r)) + r temp = apply_table(l + r, p4_table) temp = XOR(left, temp) diff --git a/project_euler/problem_02/sol4.py b/project_euler/problem_02/sol4.py index 92ea0a51e026..be4328941aa3 100644 --- a/project_euler/problem_02/sol4.py +++ b/project_euler/problem_02/sol4.py @@ -48,7 +48,7 @@ def solution(n): """ try: n = int(n) - except (TypeError, ValueError) as e: + except (TypeError, ValueError): raise TypeError("Parameter n must be int or passive of cast to int.") if n <= 0: raise ValueError("Parameter n must be greater or equal to one.") diff --git a/project_euler/problem_03/sol1.py b/project_euler/problem_03/sol1.py index 9f8ecc5e6565..347f8a53f5b4 100644 --- a/project_euler/problem_03/sol1.py +++ b/project_euler/problem_03/sol1.py @@ -50,7 +50,7 @@ def solution(n): """ try: n = int(n) - except (TypeError, ValueError) as e: + except (TypeError, ValueError): raise TypeError("Parameter n must be int or passive of cast to int.") if n <= 0: raise ValueError("Parameter n must be greater or equal to one.") diff --git a/project_euler/problem_03/sol2.py b/project_euler/problem_03/sol2.py index b6fad079fa31..daac041d4bd9 100644 --- a/project_euler/problem_03/sol2.py +++ b/project_euler/problem_03/sol2.py @@ -37,7 +37,7 @@ def solution(n): """ try: n = int(n) - except (TypeError, ValueError) as e: + except (TypeError, ValueError): raise TypeError("Parameter n must be int or passive of cast to int.") if n <= 0: raise ValueError("Parameter n must be greater or equal to one.") diff --git a/project_euler/problem_05/sol1.py b/project_euler/problem_05/sol1.py index b3a231f4dcf5..f8d83fc12b71 100644 --- a/project_euler/problem_05/sol1.py +++ b/project_euler/problem_05/sol1.py @@ -41,7 +41,7 @@ def solution(n): """ try: n = int(n) - except (TypeError, ValueError) as e: + except (TypeError, ValueError): raise TypeError("Parameter n must be int or passive of cast to int.") if n <= 0: raise ValueError("Parameter n must be greater or equal to one.") diff --git a/project_euler/problem_07/sol2.py b/project_euler/problem_07/sol2.py index 6bfc5881f548..ec182b835c84 100644 --- a/project_euler/problem_07/sol2.py +++ b/project_euler/problem_07/sol2.py @@ -50,7 +50,7 @@ def solution(n): """ try: n = int(n) - except (TypeError, ValueError) as e: + except (TypeError, ValueError): raise TypeError("Parameter n must be int or passive of cast to int.") if n <= 0: raise ValueError("Parameter n must be greater or equal to one.") diff --git a/project_euler/problem_08/sol1.py b/project_euler/problem_08/sol1.py index e7582d46c351..1cccdb8c85d6 100644 --- a/project_euler/problem_08/sol1.py +++ b/project_euler/problem_08/sol1.py @@ -53,7 +53,7 @@ def solution(n): """Find the thirteen adjacent digits in the 1000-digit number n that have the greatest product and returns it. - + >>> solution(N) 23514624000 """ diff --git a/project_euler/problem_08/sol2.py b/project_euler/problem_08/sol2.py index bf8afa8379ee..60bd8254f2c3 100644 --- a/project_euler/problem_08/sol2.py +++ b/project_euler/problem_08/sol2.py @@ -56,7 +56,7 @@ def solution(n): """Find the thirteen adjacent digits in the 1000-digit number n that have the greatest product and returns it. - + >>> solution(N) 23514624000 """ diff --git a/project_euler/problem_08/sol3.py b/project_euler/problem_08/sol3.py index dfbef5755dd7..f3e87c6d3436 100644 --- a/project_euler/problem_08/sol3.py +++ b/project_euler/problem_08/sol3.py @@ -60,7 +60,7 @@ def streval(s: str) -> int: def solution(n: str) -> int: """Find the thirteen adjacent digits in the 1000-digit number n that have the greatest product and returns it. - + >>> solution(N) 23514624000 """ diff --git a/project_euler/problem_11/sol2.py b/project_euler/problem_11/sol2.py index 64702e852b0f..1482fc7d3b04 100644 --- a/project_euler/problem_11/sol2.py +++ b/project_euler/problem_11/sol2.py @@ -34,7 +34,7 @@ def solution(): 70600674 """ with open(os.path.dirname(__file__) + "/grid.txt") as f: - l = [] + l = [] # noqa: E741 for i in range(20): l.append([int(x) for x in f.readline().split()]) diff --git a/project_euler/problem_16/sol2.py b/project_euler/problem_16/sol2.py index 88672e9a9e54..cd724d89a9e3 100644 --- a/project_euler/problem_16/sol2.py +++ b/project_euler/problem_16/sol2.py @@ -7,7 +7,7 @@ def solution(power): """Returns the sum of the digits of the number 2^power. - + >>> solution(1000) 1366 >>> solution(50) diff --git a/project_euler/problem_234/sol1.py b/project_euler/problem_234/sol1.py index 28d82b550c85..b65a506d1def 100644 --- a/project_euler/problem_234/sol1.py +++ b/project_euler/problem_234/sol1.py @@ -40,7 +40,7 @@ def solution(n): """Returns the sum of all semidivisible numbers not exceeding n.""" semidivisible = [] for x in range(n): - l = [i for i in input().split()] + l = [i for i in input().split()] # noqa: E741 c2 = 1 while 1: if len(fib(l[0], l[1], c2)) < int(l[2]): diff --git a/project_euler/problem_30/soln.py b/project_euler/problem_30/soln.py index 9d45739845a3..829ddb0fb9cc 100644 --- a/project_euler/problem_30/soln.py +++ b/project_euler/problem_30/soln.py @@ -10,7 +10,7 @@ The sum of these numbers is 1634 + 8208 + 9474 = 19316. Find the sum of all the numbers that can be written as the sum of fifth powers of their digits. - + (9^5)=59,049‬ 59049*7=4,13,343 (which is only 6 digit number ) So, number greater than 9,99,999 are rejected diff --git a/project_euler/problem_31/sol2.py b/project_euler/problem_31/sol2.py index 1f006f1a1824..b390b5b1efe5 100644 --- a/project_euler/problem_31/sol2.py +++ b/project_euler/problem_31/sol2.py @@ -30,7 +30,7 @@ def solution(pence: int) -> int: - """Returns the number of different ways to make X pence using any number of coins. + """Returns the number of different ways to make X pence using any number of coins. The solution is based on dynamic programming paradigm in a bottom-up fashion. >>> solution(500) diff --git a/project_euler/problem_551/sol1.py b/project_euler/problem_551/sol1.py index 873e520cc9b4..bbdd4d6b039d 100644 --- a/project_euler/problem_551/sol1.py +++ b/project_euler/problem_551/sol1.py @@ -28,7 +28,7 @@ def next_term(a_i, k, i, n): is cached to greatly speed up the computation. Arguments: - a_i -- array of digits starting from the one's place that represent + a_i -- array of digits starting from the one's place that represent the i-th term in the sequence k -- k when terms are written in the from a(i) = b*10^k + c. Term are calulcated until c > 10^k or the n-th term is reached. diff --git a/scheduling/first_come_first_served.py b/scheduling/first_come_first_served.py index f52c4243dec3..163f5257f361 100644 --- a/scheduling/first_come_first_served.py +++ b/scheduling/first_come_first_served.py @@ -28,7 +28,7 @@ def calculate_turnaround_times( ) -> List[int]: """ This function calculates the turnaround time of some processes. - Return: The time difference between the completion time and the + Return: The time difference between the completion time and the arrival time. Practically waiting_time + duration_time >>> calculate_turnaround_times([5, 10, 15], [0, 5, 15]) diff --git a/searches/simulated_annealing.py b/searches/simulated_annealing.py index c24adc1ddb41..6a4a8638632d 100644 --- a/searches/simulated_annealing.py +++ b/searches/simulated_annealing.py @@ -1,5 +1,7 @@ # https://en.wikipedia.org/wiki/Simulated_annealing -import math, random +import math +import random + from hill_climbing import SearchProblem diff --git a/searches/ternary_search.py b/searches/ternary_search.py index 5ecc47644248..6fdee58cf5dc 100644 --- a/searches/ternary_search.py +++ b/searches/ternary_search.py @@ -12,6 +12,7 @@ # It is recommended for users to keep this number greater than or equal to 10. precision = 10 + # This is the linear search that will occur after the search space has become smaller. def lin_search(left, right, A, target): for i in range(left, right + 1): diff --git a/sorts/bitonic_sort.py b/sorts/bitonic_sort.py index ce80c6028729..be3499de13cd 100644 --- a/sorts/bitonic_sort.py +++ b/sorts/bitonic_sort.py @@ -1,9 +1,10 @@ # Python program for Bitonic Sort. Note that this program # works only when size of input is a power of 2. + # The parameter dir indicates the sorting direction, ASCENDING # or DESCENDING; if (a[i] > a[j]) agrees with the direction, -# then a[i] and a[j] are interchanged.*/ +# then a[i] and a[j] are interchanged. def compAndSwap(a, i, j, dire): if (dire == 1 and a[i] > a[j]) or (dire == 0 and a[i] < a[j]): a[i], a[j] = a[j], a[i] diff --git a/sorts/pigeon_sort.py b/sorts/pigeon_sort.py index cf900699bc8d..cc6205f804dc 100644 --- a/sorts/pigeon_sort.py +++ b/sorts/pigeon_sort.py @@ -1,11 +1,11 @@ """ This is an implementation of Pigeon Hole Sort. For doctests run following command: - + python3 -m doctest -v pigeon_sort.py or python -m doctest -v pigeon_sort.py - + For manual testing run: python pigeon_sort.py """ diff --git a/sorts/recursive_bubble_sort.py b/sorts/recursive_bubble_sort.py index 616044778a4a..79d706e6164d 100644 --- a/sorts/recursive_bubble_sort.py +++ b/sorts/recursive_bubble_sort.py @@ -21,7 +21,7 @@ def bubble_sort(list1): >>> bubble_sort(['z','a','y','b','x','c']) ['a', 'b', 'c', 'x', 'y', 'z'] - + """ for i, num in enumerate(list1): diff --git a/strings/boyer_moore_search.py b/strings/boyer_moore_search.py index bd777c7c7e05..4bd6aff27bf3 100644 --- a/strings/boyer_moore_search.py +++ b/strings/boyer_moore_search.py @@ -1,15 +1,15 @@ """ The algorithm finds the pattern in given text using following rule. -The bad-character rule considers the mismatched character in Text. -The next occurrence of that character to the left in Pattern is found, +The bad-character rule considers the mismatched character in Text. +The next occurrence of that character to the left in Pattern is found, -If the mismatched character occurs to the left in Pattern, -a shift is proposed that aligns text block and pattern. +If the mismatched character occurs to the left in Pattern, +a shift is proposed that aligns text block and pattern. -If the mismatched character does not occur to the left in Pattern, -a shift is proposed that moves the entirety of Pattern past -the point of mismatch in the text. +If the mismatched character does not occur to the left in Pattern, +a shift is proposed that moves the entirety of Pattern past +the point of mismatch in the text. If there no mismatch then the pattern matches with text block. @@ -27,12 +27,12 @@ def __init__(self, text, pattern): def match_in_pattern(self, char): """ finds the index of char in pattern in reverse order - Parameters : + Parameters : char (chr): character to be searched - + Returns : i (int): index of char from last in pattern - -1 (int): if char is not found in pattern + -1 (int): if char is not found in pattern """ for i in range(self.patLen - 1, -1, -1): @@ -43,9 +43,9 @@ def match_in_pattern(self, char): def mismatch_in_text(self, currentPos): """ finds the index of mis-matched character in text when compared with pattern from last - Parameters : + Parameters : currentPos (int): current index position of text - + Returns : i (int): index of mismatched char from last in text -1 (int): if there is no mismatch between pattern and text block diff --git a/strings/lower.py b/strings/lower.py index c3a6e598b9ea..222b8d443289 100644 --- a/strings/lower.py +++ b/strings/lower.py @@ -1,15 +1,15 @@ def lower(word: str) -> str: - """ - Will convert the entire string to lowecase letters - + """ + Will convert the entire string to lowecase letters + >>> lower("wow") 'wow' >>> lower("HellZo") 'hellzo' >>> lower("WHAT") 'what' - + >>> lower("wh[]32") 'wh[]32' >>> lower("whAT") diff --git a/strings/manacher.py b/strings/manacher.py index 95aba1fbe65d..73b31a7bea9f 100644 --- a/strings/manacher.py +++ b/strings/manacher.py @@ -9,9 +9,10 @@ def palindromic_string(input_string): 1. first this convert input_string("xyx") into new_string("x|y|x") where odd positions are actual input characters. - 2. for each character in new_string it find corresponding length and store the length - and l,r to store previously calculated info.(please look the explanation for details) - + 2. for each character in new_string it find corresponding length and store the + length and l,r to store previously calculated info.(please look the explanation + for details) + 3. return corresponding output_string by removing all "|" """ max_length = 0 @@ -26,7 +27,8 @@ def palindromic_string(input_string): # append last character new_input_string += input_string[-1] - # we will store the starting and ending of previous furthest ending palindromic substring + # we will store the starting and ending of previous furthest ending palindromic + # substring l, r = 0, 0 # length[i] shows the length of palindromic substring with center i @@ -47,7 +49,7 @@ def palindromic_string(input_string): # does this string is ending after the previously explored end (that is r) ? # if yes the update the new r to the last index of this if i + k - 1 > r: - l = i - k + 1 + l = i - k + 1 # noqa: E741 r = i + k - 1 # update max_length and start position @@ -72,32 +74,34 @@ def palindromic_string(input_string): """ ...a0...a1...a2.....a3......a4...a5...a6.... -consider the string for which we are calculating the longest palindromic substring is shown above where ... -are some characters in between and right now we are calculating the length of palindromic substring with -center at a5 with following conditions : -i) we have stored the length of palindromic substring which has center at a3 (starts at l ends at r) and it - is the furthest ending till now, and it has ending after a6 +consider the string for which we are calculating the longest palindromic substring is +shown above where ... are some characters in between and right now we are calculating +the length of palindromic substring with center at a5 with following conditions : +i) we have stored the length of palindromic substring which has center at a3 (starts at + l ends at r) and it is the furthest ending till now, and it has ending after a6 ii) a2 and a4 are equally distant from a3 so char(a2) == char(a4) iii) a0 and a6 are equally distant from a3 so char(a0) == char(a6) -iv) a1 is corresponding equal character of a5 in palindrome with center a3 (remember that in below derivation of a4==a6) +iv) a1 is corresponding equal character of a5 in palindrome with center a3 (remember + that in below derivation of a4==a6) -now for a5 we will calculate the length of palindromic substring with center as a5 but can we use previously -calculated information in some way? -Yes, look the above string we know that a5 is inside the palindrome with center a3 and previously we have -have calculated that +now for a5 we will calculate the length of palindromic substring with center as a5 but +can we use previously calculated information in some way? +Yes, look the above string we know that a5 is inside the palindrome with center a3 and +previously we have have calculated that a0==a2 (palindrome of center a1) a2==a4 (palindrome of center a3) a0==a6 (palindrome of center a3) so a4==a6 -so we can say that palindrome at center a5 is at least as long as palindrome at center a1 -but this only holds if a0 and a6 are inside the limits of palindrome centered at a3 so finally .. +so we can say that palindrome at center a5 is at least as long as palindrome at center +a1 but this only holds if a0 and a6 are inside the limits of palindrome centered at a3 +so finally .. len_of_palindrome__at(a5) = min(len_of_palindrome_at(a1), r-a5) where a3 lies from l to r and we have to keep updating that -and if the a5 lies outside of l,r boundary we calculate length of palindrome with bruteforce and update -l,r. +and if the a5 lies outside of l,r boundary we calculate length of palindrome with +bruteforce and update l,r. it gives the linear time complexity just like z-function """ diff --git a/strings/reverse_words.py b/strings/reverse_words.py index 6b5cc6b04039..8ab060fe1d24 100644 --- a/strings/reverse_words.py +++ b/strings/reverse_words.py @@ -11,10 +11,7 @@ def reverse_words(input_str: str) -> str: >>> reverse_words(sentence) 'Python love I' """ - input_str = input_str.split(" ") - new_str = list() - - return " ".join(reversed(input_str)) + return " ".join(reversed(input_str.split(" "))) if __name__ == "__main__": diff --git a/strings/split.py b/strings/split.py index d5bff316429f..d614bd88478f 100644 --- a/strings/split.py +++ b/strings/split.py @@ -1,16 +1,16 @@ def split(string: str, separator: str = " ") -> list: """ Will split the string up into all the values separated by the separator (defaults to spaces) - + >>> split("apple#banana#cherry#orange",separator='#') ['apple', 'banana', 'cherry', 'orange'] - + >>> split("Hello there") ['Hello', 'there'] - + >>> split("11/22/63",separator = '/') ['11', '22', '63'] - + >>> split("12:43:39",separator = ":") ['12', '43', '39'] """ diff --git a/strings/upper.py b/strings/upper.py index 59b16096af0b..96b52878e05e 100644 --- a/strings/upper.py +++ b/strings/upper.py @@ -1,14 +1,14 @@ def upper(word: str) -> str: - """ - Will convert the entire string to uppercase letters - + """ + Will convert the entire string to uppercase letters + >>> upper("wow") 'WOW' >>> upper("Hello") 'HELLO' >>> upper("WHAT") 'WHAT' - + >>> upper("wh[]32") 'WH[]32' """ diff --git a/traversals/binary_tree_traversals.py b/traversals/binary_tree_traversals.py index c522ecebc0ff..7f100e66f200 100644 --- a/traversals/binary_tree_traversals.py +++ b/traversals/binary_tree_traversals.py @@ -1,3 +1,5 @@ +# flake8: noqa + """ This is pure Python implementation of tree traversal algorithms """ @@ -144,7 +146,7 @@ def level_order_actual(node: TreeNode) -> None: >>> root.left, root.right = tree_node2, tree_node3 >>> tree_node2.left, tree_node2.right = tree_node4 , tree_node5 >>> tree_node3.left, tree_node3.right = tree_node6 , tree_node7 - >>> level_order_actual(root) + >>> level_order_actual(root) 1 2 3 4 5 6 7