diff --git a/notebooks/2_uninformed_search/images/Algorithms.png b/notebooks/2_uninformed_search/images/Algorithms.png deleted file mode 100644 index 578abdf6..00000000 Binary files a/notebooks/2_uninformed_search/images/Algorithms.png and /dev/null differ diff --git a/notebooks/2_uninformed_search/images/Breadth-First-Search.gif b/notebooks/2_uninformed_search/images/Breadth-First-Search.gif deleted file mode 100644 index 751a0c05..00000000 Binary files a/notebooks/2_uninformed_search/images/Breadth-First-Search.gif and /dev/null differ diff --git a/notebooks/2_uninformed_search/images/Conclusion.jpg b/notebooks/2_uninformed_search/images/Conclusion.jpg new file mode 100644 index 00000000..80165f6a Binary files /dev/null and b/notebooks/2_uninformed_search/images/Conclusion.jpg differ diff --git a/notebooks/2_uninformed_search/images/ID-dfs.jpg b/notebooks/2_uninformed_search/images/ID-dfs.jpg deleted file mode 100644 index 8ba2e714..00000000 Binary files a/notebooks/2_uninformed_search/images/ID-dfs.jpg and /dev/null differ diff --git a/notebooks/2_uninformed_search/images/IDDFS.jpg b/notebooks/2_uninformed_search/images/IDDFS.jpg new file mode 100644 index 00000000..055a46a3 Binary files /dev/null and b/notebooks/2_uninformed_search/images/IDDFS.jpg differ diff --git a/notebooks/2_uninformed_search/images/UCS_1.jpg b/notebooks/2_uninformed_search/images/UCS_1.jpg new file mode 100644 index 00000000..57307828 Binary files /dev/null and b/notebooks/2_uninformed_search/images/UCS_1.jpg differ diff --git a/notebooks/2_uninformed_search/images/UCS_2.jpg b/notebooks/2_uninformed_search/images/UCS_2.jpg new file mode 100644 index 00000000..fc1429b5 Binary files /dev/null and b/notebooks/2_uninformed_search/images/UCS_2.jpg differ diff --git a/notebooks/2_uninformed_search/images/UCS_3.1.jpg b/notebooks/2_uninformed_search/images/UCS_3.1.jpg new file mode 100644 index 00000000..ef4a1ffb Binary files /dev/null and b/notebooks/2_uninformed_search/images/UCS_3.1.jpg differ diff --git a/notebooks/2_uninformed_search/images/UCS_3.jpg b/notebooks/2_uninformed_search/images/UCS_3.jpg new file mode 100644 index 00000000..1e6ae96f Binary files /dev/null and b/notebooks/2_uninformed_search/images/UCS_3.jpg differ diff --git a/notebooks/2_uninformed_search/images/UCS_4.jpg b/notebooks/2_uninformed_search/images/UCS_4.jpg new file mode 100644 index 00000000..dff512b1 Binary files /dev/null and b/notebooks/2_uninformed_search/images/UCS_4.jpg differ diff --git a/notebooks/2_uninformed_search/images/UCS_5.jpg b/notebooks/2_uninformed_search/images/UCS_5.jpg new file mode 100644 index 00000000..9a52dc36 Binary files /dev/null and b/notebooks/2_uninformed_search/images/UCS_5.jpg differ diff --git a/notebooks/2_uninformed_search/images/UCS_6.jpg b/notebooks/2_uninformed_search/images/UCS_6.jpg new file mode 100644 index 00000000..e5dabb94 Binary files /dev/null and b/notebooks/2_uninformed_search/images/UCS_6.jpg differ diff --git a/notebooks/2_uninformed_search/images/UCS_7.jpg b/notebooks/2_uninformed_search/images/UCS_7.jpg new file mode 100644 index 00000000..68449cc5 Binary files /dev/null and b/notebooks/2_uninformed_search/images/UCS_7.jpg differ diff --git a/notebooks/2_uninformed_search/images/UCS_8.jpg b/notebooks/2_uninformed_search/images/UCS_8.jpg new file mode 100644 index 00000000..6e2ab8e9 Binary files /dev/null and b/notebooks/2_uninformed_search/images/UCS_8.jpg differ diff --git a/notebooks/2_uninformed_search/images/UCS_9.jpg b/notebooks/2_uninformed_search/images/UCS_9.jpg new file mode 100644 index 00000000..9f89b296 Binary files /dev/null and b/notebooks/2_uninformed_search/images/UCS_9.jpg differ diff --git a/notebooks/2_uninformed_search/images/Uniform-Cost-Search.png b/notebooks/2_uninformed_search/images/Uniform-Cost-Search.png deleted file mode 100644 index 7fd0b46f..00000000 Binary files a/notebooks/2_uninformed_search/images/Uniform-Cost-Search.png and /dev/null differ diff --git a/notebooks/2_uninformed_search/images/bfs.jpg b/notebooks/2_uninformed_search/images/bfs.jpg new file mode 100644 index 00000000..3aeead31 Binary files /dev/null and b/notebooks/2_uninformed_search/images/bfs.jpg differ diff --git a/notebooks/2_uninformed_search/images/bidir.png b/notebooks/2_uninformed_search/images/bidir.png new file mode 100644 index 00000000..82266ce2 Binary files /dev/null and b/notebooks/2_uninformed_search/images/bidir.png differ diff --git a/notebooks/2_uninformed_search/images/breadth-first-search.png b/notebooks/2_uninformed_search/images/breadth-first-search.png new file mode 100644 index 00000000..14cac50e Binary files /dev/null and b/notebooks/2_uninformed_search/images/breadth-first-search.png differ diff --git a/notebooks/2_uninformed_search/images/cities.png b/notebooks/2_uninformed_search/images/cities.png deleted file mode 100644 index 4a55c31d..00000000 Binary files a/notebooks/2_uninformed_search/images/cities.png and /dev/null differ diff --git a/notebooks/2_uninformed_search/images/depth-limited-search-algorithm.png b/notebooks/2_uninformed_search/images/depth-limited-search-algorithm.png new file mode 100644 index 00000000..e2c8ffb8 Binary files /dev/null and b/notebooks/2_uninformed_search/images/depth-limited-search-algorithm.png differ diff --git a/notebooks/2_uninformed_search/images/graph-dfs-step-0.webp b/notebooks/2_uninformed_search/images/graph-dfs-step-0.webp new file mode 100644 index 00000000..2efecead Binary files /dev/null and b/notebooks/2_uninformed_search/images/graph-dfs-step-0.webp differ diff --git a/notebooks/2_uninformed_search/images/graph-dfs-step-1.webp b/notebooks/2_uninformed_search/images/graph-dfs-step-1.webp new file mode 100644 index 00000000..456a4b6c Binary files /dev/null and b/notebooks/2_uninformed_search/images/graph-dfs-step-1.webp differ diff --git a/notebooks/2_uninformed_search/images/graph-dfs-step-2.webp b/notebooks/2_uninformed_search/images/graph-dfs-step-2.webp new file mode 100644 index 00000000..22b69ee0 Binary files /dev/null and b/notebooks/2_uninformed_search/images/graph-dfs-step-2.webp differ diff --git a/notebooks/2_uninformed_search/images/graph-dfs-step-3.webp b/notebooks/2_uninformed_search/images/graph-dfs-step-3.webp new file mode 100644 index 00000000..efe7f4cd Binary files /dev/null and b/notebooks/2_uninformed_search/images/graph-dfs-step-3.webp differ diff --git a/notebooks/2_uninformed_search/images/graph-dfs-step-4.webp b/notebooks/2_uninformed_search/images/graph-dfs-step-4.webp new file mode 100644 index 00000000..04956f33 Binary files /dev/null and b/notebooks/2_uninformed_search/images/graph-dfs-step-4.webp differ diff --git a/notebooks/2_uninformed_search/images/graph-dfs-step-5.webp b/notebooks/2_uninformed_search/images/graph-dfs-step-5.webp new file mode 100644 index 00000000..7081db4a Binary files /dev/null and b/notebooks/2_uninformed_search/images/graph-dfs-step-5.webp differ diff --git a/notebooks/2_uninformed_search/images/photo b/notebooks/2_uninformed_search/images/photo new file mode 100644 index 00000000..8b137891 --- /dev/null +++ b/notebooks/2_uninformed_search/images/photo @@ -0,0 +1 @@ + diff --git a/notebooks/2_uninformed_search/images/summary.png b/notebooks/2_uninformed_search/images/summary.png deleted file mode 100644 index f2149995..00000000 Binary files a/notebooks/2_uninformed_search/images/summary.png and /dev/null differ diff --git a/notebooks/2_uninformed_search/index.ipynb b/notebooks/2_uninformed_search/index.ipynb deleted file mode 100644 index 91ad68b0..00000000 --- a/notebooks/2_uninformed_search/index.ipynb +++ /dev/null @@ -1,595 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "
\n", - "
\n", - "
\n", - "
\n", - "
\n", - "
\n", - "
\n", - "
\n", - "
\n", - "

AI - Uninformed Search

\n", - "

Sharif University of Technology - Computer Engineering Department

\n", - "
\n", - "

Fateme Khashei, Hossein Sobhi, Ali asghar Ghanati

\n", - "
\n", - "
\n", - "
\n", - "
\n", - "
\n", - "
" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "---" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "

Probelm solving agents

" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Consider that we are currently in the city Arad, and have a flight leaving tomorrow from Bucharest. We need to find the shortest path from Arad to Bucharest so that we get there in time (a path is a sequence of cities, like: Arad, Sibiu, Fagaras, bucharest). An AI agent can help us achieve this goal (finding the best path) by using search algorithms." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "![cities](./images/cities.png \"cities\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "

Search strategies

" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "A search strategy is defined by picking the order of node expansion, (expansion means visiting a node in a graph and generating its successors)\n", - "strategies are evaluated by the following means:\n", - "\n", - "time and space complexity are measured in terms of:\n", - "" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Uninformed Search" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Uninformed search is a class of general-purpose search algorithms, used in different data structures, algorithms, and AIs.\n", - "\n", - "Uninformed search algorithms do not have additional information about domain in which they are searching for a solution (mostly how far from the goal they are) other than how to traverse the tree, thats why they are called \"uninformed\".\n", - "\n", - "Uninformed search algorithms are also called blind search algorithms. The search algorithm produces the search tree without using any domain knowledge, which is a brute force in nature. They don’t have any background information on how to approach the goal or whatsoever. But these are the basics of search algorithms in AI.\n", - "\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The diffrent type of search algorithms are as follows:" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "1. Breadth-first Search\n", - "2. Uniform cost search\n", - "3. Depth-first Search\n", - "4. Depth-limited Search\n", - "5. Iterative deepening depth-first search" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "![Search Algorithms](./images/Algorithms.png \"Search Algorithms\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "---" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "

Breadth-First Search

" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Breadth-first search is the most common search strategy for traversing a tree or graph. This algorithm searches breadthwise in a tree or graph, so it is called breadth-first search.\n", - "\n", - "BFS algorithm starts searching from the root node of the tree and expands all successor node at the current level before moving to nodes of next level.\n", - "\n", - "In the below tree structure, you can see the traversing of the tree using BFS algorithm." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "![BFS](./images/Breadth-First-Search.gif \"Breadth-First Search\")\n", - "\n", - "It starts from the root node of the tree which is 1, then goes to the next level and expands 2, we still have two nodes at this level so it expandss those two nodes which are 3 and 4, then there would be no successor left in this level so we can expand the next level and the proccess will be the same which gives us 5, 6, 7, 8 and for the last level we have 9, 10." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "**Completeness:**\n", - "\n", - "BFS is complete, which means if the shallowest goal node is at some finite depth, then BFS will find a solution.\n", - "\n", - "**Time complexity:**\n", - "\n", - "Time Complexity of BFS algorithm can be obtained by the number of nodes traversed in BFS until the shallowest Node.\n", - "\n", - "Where the d = depth of shallowest solution and b is a node at every state.\n", - "\n", - "> T( b ) = b + b1 + b2 + ... + bd + b( bd - 1 ) = O( bd+1 )\n", - "\n", - "**Space complexity:**\n", - "\n", - "BFS algorithm requires a lot of memory space, because it keeps every node in memory.\n", - " \n", - "Space complexity of BFS is O( bd+1 ).\n", - "\n", - "**Optimality:**\n", - "\n", - "In general, BFS is not optimal.\n", - "\n", - "But BFS is optimal if path cost is a non-decreasing function of the depth of the node e.g. `cost per step = 1`.\n", - "\n", - "**Pseudocode:**\n", - "```python\n", - "function BFS (problem ,graph, source)returns soln/fail\n", - " let Q be queue.\n", - " Q.enqueue( source )\n", - " mark source as visited\n", - " while ( Q is not empty)\n", - " node = Q.dequeue( )\n", - " if Goal-Test(problem, State[node]) then return node\n", - " for all successor in Expand(node, problem) do\n", - " if successor is not visited \n", - " Q.enqueue( successor )\n", - " mark successor as visited.\n", - " return failure\n", - "\n", - "```\n", - "\n", - "As you can see space is a big problem in this algorithm, it can easily generate nodes at 100MB/sec which means in 24 hours, 8640GB of data will be generated.\n", - "\n", - "BFS will return shortest path in terms of number of transitions, It doesn’t find the least cost path.\n", - "\n", - "This problem leads us to another search algorithm called Uniform Cost Search which is a generalization of BFS algorithm." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "---" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "

Uniform Cost Search

" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Uniform cost search, also called dijkstra, is a searching algorithm used for traversing a weighted tree or graph. This algorithm comes into play when a different cost is available for each edge.\n", - "\n", - "The primary goal of the uniform cost search is to find a path to the goal node which has the lowest cumulative cost. \n", - "\n", - "Uniformcost search expands nodes according to their path costs form the root node. It can be used to solve any graph or tree where the optimal cost is in demand. It gives maximum priority to the lowest cumulative cost.\n", - "\n", - "Uniform cost search is equivalent to BFS algorithm if the path cost of all edges is the same.\n", - "\n", - "It should be noted that UCS does not care about the number of steps involved in searching and only concerned about path cost. Due to which this algorithm may be stuck in an infinite loop.\n", - "\n", - "In the below tree structure, you can see the traversing of the tree using UCS algorithm." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "![UCS](./images/Uniform-Cost-Search.png \"Uniform Cost Search\")\n", - "The proccess of visiting the tree is similar to BFS except the fact that in BFS we use depth of the node to decide if we want to expand the node or not but in UFC we make the decision based on distance from root node.\n", - "\n", - "In this example we have the root node and then the next level in the tree is consisted of yellow nodes, next green nodes and then purple ones and it goes like that till the end." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "**Completeness:**\n", - "\n", - "Uniform-cost search is complete, if a goal state exists, UCS will find it because it must have some finite length shortest path.\n", - "\n", - "In other words, UCS is complete if the cost of each step exceeds some small positive integer, this to prevent infinite loops.\n", - "\n", - "**Time complexity:**\n", - "\n", - "Let C* be cost of the optimal solution, and ε be each step to get closer to the goal node. Then the number of steps is C\\*/ε .\n", - "\n", - "Hence, the worst-case time complexity of Uniform-cost search is O( bC\\*/ε ).\n", - "\n", - "**Space complexity:**\n", - "\n", - "The same logic is for space complexity so, the worst-case space complexity of Uniform-cost search is O( bC\\*/ε ).\n", - "\n", - "**Optimality:**\n", - "\n", - "Uniform cost search is optimal because at every state the path with the least cost is chosen.\n", - "\n", - "**Pseudocode:**\n", - "```python\n", - "function UCS (problem, graph, source)returns soln/fail\n", - " for each successor in graph do\n", - " Set-Infinity-Dist(successor)\n", - " let Q be queue.\n", - " Q.enqueue(graph)\n", - " Dist[source] <- 0\n", - " while ( Q is not empty)\n", - " node = Get-Min-Dist(Q)\n", - " Q.remove(node)\n", - " if Goal-Test(problem, State[node]) then return node\n", - " for all successor in Expand(node, problem) do\n", - " Set-Dist(successor, node)\n", - " return failure\n", - "\n", - "```\n", - "\n", - "\n", - "This algorithm explores options in every direction because it doesn't have any information about goal location, this problem will be discussed in informed search chapter.\n", - "\n", - "As we mentioned before space is a big problem in BFS and the problem still remains in UCS, to solve it we are going to talk about another search algorithm called Depth-First search." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "---" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "

Depth-First Search

" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Depth first search is a recursive algorithm for traversing a tree or graph that expands nodes in one branch as deep as the branch goes before expanding nodes in other branches.\n", - "\n", - "It is called the depth-first search because it starts from the root node and follows each path to its greatest depth node before moving to the next path.\n", - "\n", - "In the below tree structure, you can see the traversing of the tree using DFS algorithm." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "![DFS](./images/Depth-First-Search.gif \"Depth-First Search\")\n", - "It starts from the root node which is 1 then expands a child and goes as deep as it can in the tree, so we get 2, 3 and 4 then it can't go any deeper so it expands another child which is 5 and perform DFS on this node which gives us 6, 7 and 8.\n", - "\n", - "Proccess goes on until there is no node left." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "**Completeness:**\n", - "\n", - "DFS search algorithm is complete for graphs and trees in finite spaces (depths) as it will expand every node within a limited search tree.\n", - "\n", - "DFS for graphs with cycles needs to be modified e.g. keeping the record of visited nodes to avoid processing a node more than once and getting caught in an infinite loop.\n", - "\n", - "**Time complexity:**\n", - "\n", - "Time complexity of DFS will be equivalent to the node traversed by the algorithm.\n", - "\n", - "Let m = maximum depth of any node and this can be much larger than d (Shallowest solution depth).\n", - "\n", - "Time complexity of DFS is O( bm ) time which is terrible if m is much larger than d.\n", - "\n", - "**Space complexity:**\n", - "\n", - "DFS algorithm needs to store only single path from the root node, hence space complexity of DFS is equivalent to the size of the fringe set, which is O(bm). ( linear space! )\n", - "\n", - "**Optimality:**\n", - "\n", - "DFS search algorithm is not optimal, as it may generate a large number of steps to reach to the solution.\n", - "\n", - "**Pseudocode:**\n", - "```python\n", - "function DFS (problem ,graph, source)returns soln/fail\n", - " let S be stack.\n", - " S.push( source )\n", - " mark source as visited\n", - " while ( S is not empty)\n", - " node = S.pop( )\n", - " if Goal-Test(problem, State[node]) then return node\n", - " for all successor in Expand(node, problem) do\n", - " if successor is not visited \n", - " S.push( successor )\n", - " mark successor as visited.\n", - " return failure\n", - "\n", - "```\n", - "\n", - "So far we have introduced two algorithms:\n", - "- BFS which is better in time complexity\n", - "- DFS which is better in space complexity\n", - "\n", - "we are looking for a way to combine strength of both in a method." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "---" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "

Depth Limited Search

" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "A depth-limited search algorithm is similar to depth-first search with a with a depth limit.\n", - "\n", - "Depth limited search is limited to depth l, which means that nodes at depth l will treat as it has no successor nodes further.\n", - "\n", - "Depth-limited search can solve the drawback of the infinite path in the Depth-first search." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "**Completeness:**\n", - "\n", - "Depth-limited search algorithm is not complete, because it will not process nodes at depth deeper than l (depth limit).\n", - "\n", - "**Time complexity:**\n", - "\n", - "Time complexity of DLS algorithm is O( bl ).\n", - "\n", - "**Space complexity:**\n", - "\n", - "Space complexity of DLS algorithm is O( bl ).\n", - "\n", - "**Optimality:**\n", - "\n", - "Depth-limited search can be viewed as a special case of DFS, and it is also not optimal even if l > d. ( not complete means not optimal! )\n", - "\n", - "**Pseudocode:**\n", - "```python\n", - "function Depth-Limit-Search(problem, limit) returns soln/fail/cutoff\n", - " Recursive-DLS(Make-Node(Initial-State[problem]), problem, limit)\n", - " \n", - "function Recursive-DLS(node, problem, limit) returns soln/fail/cutoff\n", - " cutoff-occured? <- false\n", - " if Goal-Test(problem, State[node]) then return node\n", - " else if Depth[node] = limit then return cutoff\n", - " else for each successor in Expand(node, problem) do\n", - " result <- Recursive-DLS(successor, problem, limit)\n", - " if( ressult = cutoff ) then cutoff-occured? <- true \n", - " else if( result != failure ) then return result\n", - " if cutoff-occured? then return \n", - "\n", - "\n", - "\n", - "\n", - "```" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "---" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "

Iterative Deepening Search

" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The iterative deepening algorithm is a combination of DFS and BFS algorithms. This search algorithm finds out the best depth limit and does it by gradually increasing the limit until a goal is found.\n", - "\n", - "This algorithm performs depth-first search up to a certain \"depth limit\", and it keeps increasing the depth limit after each iteration until the goal node is found.\n", - "\n", - "This Search algorithm combines the benefits of DFS's space-efficiency and BFS's completenessy.\n", - "\n", - "The iterative search algorithm is useful uninformed search when search space is large, and depth of goal node is unknown.\n", - "\n", - "In the below picture, you can see the traversing of a tree using iterative deepening algorithm." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "![ID-dfs.jpg](./images/ID-dfs.jpg)\n", - "At first Limit is set to 0, it visits root node then limit is increased by 1 and we perform DFS for root and nodes with depth of 1.\n", - "\n", - "for limit l, we perform DFS on nodes with maximum depth l and this goes on until we reach the goal." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "**Completeness:**\n", - "\n", - "Iterative deepening search is complete, which means if branching factor is finite, then it will find a solution.\n", - "\n", - "**Time complexity:**\n", - "\n", - "Let's suppose b is the branching factor and depth is d then the worst-case time complexity is:\n", - "\n", - "> T( b ) = (d+1)b0 + db1 + (d−1)b2 + ... + bd = O( bd )\n", - "\n", - "or more percisely:\n", - "\n", - ">T( b ) = O( bd(1 – 1/b)-2 )\n", - "\n", - "In this algorithm because of the fact that we want to avoid space problems, we wont store any data therefor we may have to repeat some actions but it won't trouble us because time complexity still remains O( bd ), similar to BFS.\n", - "\n", - "**Space complexity:**\n", - "\n", - "The space complexity of IDDFS will be O( bd )\n", - "\n", - "**Optimality:**\n", - "\n", - "IDDFS algorithm is optimal if path cost is a non-decreasing function of the depth of the node e.g. `cost per step = 1`.\n", - "\n", - "**Pseudocode:**\n", - "```python\n", - "function Iterative-Deepening-Search(problem) returns a solution\n", - " inputs: problem, a problem\n", - " for depth <- 0 to ∞ do\n", - " result <- Depth-Limited-Search(problem, depth)\n", - " if result ≠ cutoff then return result\n", - " end\n", - "```" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "

Summary of Algorithms

" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "![summary.png](./images/summary.png)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Refrences:" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "- AI course teached by Dr. Rohban at Sharif University of Technology, Spring 2021\n", - "- https://www.javatpoint.com\n", - "- https://www.analyticsvidhya.com\n", - "- https://www.geeksforgeeks.org\n", - "- https://www.wikipedia.org" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.7.4" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} diff --git a/notebooks/2_uninformed_search/index.md b/notebooks/2_uninformed_search/index.md new file mode 100644 index 00000000..44f19e98 --- /dev/null +++ b/notebooks/2_uninformed_search/index.md @@ -0,0 +1,282 @@ + +# Uninformed Search + +Every problem around us should be modeled and formulated in the first step if we want to solve them. After that we must find a solution and solution is an action sequence, in order to do that we must "search". This is where search algorithm are being used and developed. The search possible action sequences starting at the initial state form a search tree with the initial state node at the root; the branches are actions and the nodes correspond to states in the state space of the problem. the very first thing we have to do is to check that if the current state is the goal node or not. If it is not we must take various actions and we do this by expansion; expandind the current state. This would lead to generating a new set of states from parent node and new states are called child nodes. Depends on the search strategy we chose, we proceed to reach out the goal state and then we stop. + +# Contents + +[Introduction](#Introduction) + +[Breadth-first Search](#Breadth-first-Search) + +[Uniform cost search](#Uniform-cost-search) + +[Depth-first Search](#Depth-first-Search) + +[Depth-limited Search](#Depth-limited-Search) + +[Iterative deepening depth-first search](#Iterative-deepening-depth-first-search) + +[Bidirectional search](#Bidirectional-search) + +[Conclusion](#Conclusion) + +[References](#References) + + +## Introduction +As the name suggests, Uninformed search and algorithms try to reach the goal state blindly which means they don't have and save extra and additional information about search space. They operate in a brute force way and use no domain knowlege for operating and they just search and move forward, whether it is a right way and route or not until they reach the goal state. + +There are two kinds of search; Tree search and Graph search which is explained below. +In tree search, while using the main strategy, we consider expanded nodes again if it's neseccary or on the way. But in graph search we will not do that and the expanded nodes will be ignored and will not get expanded again. It is obvious that Graph search is much faster than tree search. + +Also two concepts need to be discussed which are Optimality and Time complexity. +Optimality criteria are the conditions a function must satisfy at its minimum point which in this concept is reaching out the goal state. +Time complexity is the amount of time taken by an algorithm to run, as a function of the length of the input. It measures the time taken to execute each statement of code in an algorithm. + +## Breadth-first Search +It is one of the algorithms to search a graph or a tree to find a specific thing we’re looking for. It is an uninformed kind of search and uses memory to search nodes. The procedure of it is that the algorithm starts from a node and explores all the nodes above it that are in the same level and it chooses the shallowest unexpanded node for expansion. This algorithm has some similarities with the DFS algorithm but it is different in some ways. + +![](https://github.com/mohsenosl99/notes/blob/master/notebooks/2_uninformed_search/images/bfs.jpg) + +Now let’s talk about this algorithm features and performance measures. + +**Time complexity** + +It is O(b^(d+1)). This is an exponential complexity which is so much + +**Space complexity** + +This algorithm keeps every nodes in its memory, thus it would be equal to O(b^(d+1)) + +**Completeness** + +It’s a complete algorithm only and only that if b is finite. + +**Optimality** + +It is not an optimal algorithm in general but if cost of steps be equal to one, this algorithm would be optimal. + +As it is obvious this algorithm has some pros and cons. The benefit of it is that it is accurate and easy to run and implement. But if the goal be at depth 21, this algorithm would take hundreds of years to find the solution. + +As it is obvious from the example below, the algorithm searches aevery node in a level and then goes to next level . +![](https://github.com/mohsenosl99/notes/blob/master/notebooks/2_uninformed_search/images/breadth-first-search.png) +## Uniform-Cost Search + +Uniform-Cost Search (**UCS**) is a variant of Dijikstra’s algorithm. Here, instead of inserting all vertices into a priority queue, we insert only source, then one by one insert when needed. In every step, we check if the item is already in priority queue (using visited array). If yes, we perform decrease key, else we insert it. +This variant of Dijkstra is useful for infinite graphs and those graph which are too large to represent in the memory. Uniform-Cost Search is mainly used in Artificial Intelligence. + +|Step|Description|Shape| +|----|-----------|-----| +|0|Example graph |![](https://github.com/mohsenosl99/notes/blob/master/notebooks/2_uninformed_search/images/UCS_1.jpg)| +|1|First, we just have the source node in the queue |![](https://github.com/mohsenosl99/notes/blob/master/notebooks/2_uninformed_search/images/UCS_2.jpg)| +|2|Then, we add its children to the priority queue with their cumulative distance as priority |![](https://github.com/mohsenosl99/notes/blob/master/notebooks/2_uninformed_search/images/UCS_3.1.jpg)| +|3|Now, A has the minimum distance (i.e., maximum priority), so it is extracted from the list. Since A is not the destination, its children are added to the PQ.(in pervious step node 1 had minimun cost)|![](https://github.com/mohsenosl99/notes/blob/master/notebooks/2_uninformed_search/images/UCS_3.jpg)| +|4|B has the maximum priority now, so its children are added to the queue|![](https://github.com/mohsenosl99/notes/blob/master/notebooks/2_uninformed_search/images/UCS_4.jpg)| +|5|Up next, G will be removed and its children will be added to the queue|![](https://github.com/mohsenosl99/notes/blob/master/notebooks/2_uninformed_search/images/UCS_5.jpg)| +|6|C and I have the same distance, so we will remove alphabetically|![](https://github.com/mohsenosl99/notes/blob/master/notebooks/2_uninformed_search/images/UCS_6.jpg)| +|7|Next, we remove I; however, I has no further children, so there is no update to the queue. After that, we remove D. D only has one child, E, with a cumulative distance of 10. However, E already exists in our queue with a lesser distance, so we will not add it again.The next minimum distance is that of E, so that is what we will remove|![](https://github.com/mohsenosl99/notes/blob/master/notebooks/2_uninformed_search/images/UCS_7.jpg)| +|8|Now, the minimum cost is F, so it will be removed and its child (J) will be added|![](https://github.com/mohsenosl99/notes/blob/master/notebooks/2_uninformed_search/images/UCS_8.jpg)| +|9|After this, H has the minimum cost so it will be removed, but it has no children to be added:|![](https://github.com/mohsenosl99/notes/blob/master/notebooks/2_uninformed_search/images/UCS_9.jpg)| +|10|Finally, we remove the Dest node, check that it is our target, and stop the algorithm here. The minimum distance between the source and destination nodes (i.e., 8) has been found.|![](https://github.com/mohsenosl99/notes/blob/master/notebooks/2_uninformed_search/images/UCS_1.jpg)| + + +### Performance Measure: + +**Completeness** + +* Yes, if step cost ≥ ε. Therefore, it will get stuck in an infinite loop if there is a path with an infinite sequence of zero-cost actions. + +**Time complexity** + +Let C* be cost of the optimal solution, and ε be each step to get closer to the goal node. Then the number of steps is C/ε . +Hence, the worst-case time complexity of Uniform-cost search is O( b^(C*/ε) ). + + +**Space complexity** + +The same logic is for space complexity.Number of nodes with f ≤ cost of optimal solution, O(b^⌈C∗/ε⌉) +> O(nd) + +**Optimality** + +* Uniform-cost search is optimal. This is because, at every step the path with the least cost is chosen, and paths never gets shorter as nodes are added, ensuring that the search expands nodes in the order of their optimal path cost + + +## Depth-first search + +Depth-first search (**DFS**) is an algorithm for traversing or searching tree or graph data structures. The algorithm starts at the root node (selecting some arbitrary node as the root node in the case of a graph) and explores as far as possible along each branch before backtracking + +**Depth First Search Example** + +Let's start with a simple example. We use an undirected graph with 5 vertices. (Look at this table below and follow each step) + +|Step|Description|Shape| +|----|-----------|-----| +|0|Look at this example and shape.|![](![](https://github.com/mohsenosl99/notes/blob/master/notebooks/2_uninformed_search/images/graph-dfs-step-0.webp))| +|1|We start from vertex 0, the DFS algorithm starts by putting it in the Visited list and putting all its adjacent vertices in the stack.( adjacent vertices = {1,2,3} )|![](https://github.com/mohsenosl99/notes/blob/master/notebooks/2_uninformed_search/images/graph-dfs-step-1.webp)| +|2|Next, we visit the element at the top of the stack i.e., 1, and go to its adjacent nodes. Since 0 has already been visited, we visit 2 instead.|![](https://github.com/mohsenosl99/notes/blob/master/notebooks/2_uninformed_search/images/graph-dfs-step-2.webp)| +|3|Vertex 2 has an unvisited adjacent vertex in 4, so we add that to the top of the stack and visit it. ( we put vertex 4 before vertex 3 in stack)|![](https://github.com/mohsenosl99/notes/blob/master/notebooks/2_uninformed_search/images/graph-dfs-step-3.webp)| +|4|Continue Step 3|![](https://github.com/mohsenosl99/notes/blob/master/notebooks/2_uninformed_search/images/graph-dfs-step-4.webp)| +|5|After we visit the last element 3, it doesn't have any unvisited adjacent nodes, so we have completed the Depth First Traversal of the graph.|![](https://github.com/mohsenosl99/notes/blob/master/notebooks/2_uninformed_search/images/graph-dfs-step-5.webp)| + +For more clarification, we give an example like an animation with start to end(look at the gif below). + +![](https://github.com/mohsenosl99/notes/blob/master/notebooks/2_uninformed_search/images/Depth-First-Search.gif) + +### Performance Measure: + +**Completeness** + +It depends on the search space: +* If the search space is finite, then Depth-First Search is complete as it will expand every node within a limited search tree. +* if there are infinitely many alternatives, it might not find a solution. + +**Time complexity** + +(Equivalent to the number of nodes traversed in DFS. ) +> T(n)= 1 + n +n^2 + n^3 + ... + n^d = O(n^d) + + +**Space complexity** + +(Equivalent to how large can the fringe get. ) +> O(nd) + +**Optimality** + +DFS is not optimal, meaning the number of steps in reaching the solution, or the cost spent in reaching it is high. + + + +## Depth-limited Search + +The depth-limited search (DLS) method is almost equal to depth-first search (DFS), but DLS can work on the infinite state space problem because it bounds the depth of the search tree with a predetermined limit L. Nodes at this depth limit are treated as if they had no successors. + +Depth-limited search can be terminated with two Conditions of failure: + +* Standard failure value: It indicates that problem does not have any solution. +* Cutoff failure value: It defines no solution for the problem within a given depth limit. + +Look at the example: + + +![](https://github.com/mohsenosl99/notes/blob/master/notebooks/2_uninformed_search/images/depth-limited-search-algorithm.png) + +At first we determine a level until which we are going to expand nodes. For the first step we consider it to be zero. We expand the nodes with DFS startegy and check them if they are goal state or not. If the goal state were not among them, we return to the begininng node and start again but this time we expand the nodes a level deeper which would be level one. We proceed until when we reach out the desired state. + +### Performance Measure: + +**Completeness** + +The limited path introduces another problem which is the case when we choose l < d, in which is our DLS will never reach a goal, in this case we can say that DLS is not complete. + +**Time Complexity** + +>Time complexity of DLS algorithm is O(b^l) + +**Space Complexity** + +>Space complexity of DLS algorithm is O(b*l) + +**Optimality** + +One can view DFS as a special case of the depth DLS, that DFS is DLS with l = infinity. +DLS is not optimal even if l > d. + + +## Iterative deepening depth-first search + +The iterative deepening algorithm is a combination of DFS and BFS algorithms. This search algorithm finds out the best depth limit and does it by gradually increasing the limit until a goal is found. + +This algorithm performs depth-first search up to a certain "depth limit", and it keeps increasing the depth limit after each iteration until the goal node is found. + +This Search algorithm combines the benefits of Breadth-first search's fast search and depth-first search's memory efficiency. + +The iterative search algorithm is useful uninformed search when search space is large, and depth of goal node is unknown. + +**Example** + +Look at the example below. + +![](https://github.com/mohsenosl99/notes/blob/master/notebooks/2_uninformed_search/images/IDDFS.jpg) + +Suppose that node M is goal. So we start from Limit =0 (node A). We check that A is goal or not, if it is not we add 1 to limit and check the nodes of B and C. So continue until reach the goal. + +### Performance Measure: + +**Completeness** + +This algorithm is complete is if the branching factor is finite. + +**Time Complexity** + +> T( b ) = (d+1)b^0 + db^1 + (d−1)b^2 + ... + bd = O( b^d ) + +or more precisely + +> O(b^d(1 – 1/b)^-2) + +Note: In this algorithm because of the fact that we want to avoid space problems, we won't store any data therefore we may have to repeat some actions but it won't trouble us because time complexity still remains O( b^d ), similar to BFS. + + +**Space Complexity** + +>The space complexity of IDDFS will be O(d), where d is the depth of the goal. + +Exactly like DFS, only those nodes will be stored in the stack that represents the branch of the tree which is being expanded. Since the maximum depth of stack is d, the maximum amount of space which is needed would be O(d). + +**Optimality** + +>IDDFS algorithm is optimal if path cost equals 1. + + + + + + +## Bidirectional search +This algorithm is one of the algorithms that is being used for searching a graph or tree. It is originated from BFS algorithm but with the difference that it starts searching from two nodes and precede to search until those two intersect with each other. This algorithm is so much faster and simpler than the BFS algorithm and using this algorithm is appropriate when all the nodes or at least goal states are defined clearly and the branching factor is exactly the same in both directions. For example, in the chart below. If we start from nodes 0 and 14, these two will reach out to each other on node 7 and then the path will be found. + +![](https://github.com/mohsenosl99/notes/blob/master/notebooks/2_uninformed_search/images/bidir.png) + +Now let’s talk about its features and performance measures. + +**Completeness** + +It is a complete algorithm. + +**Optimality** + +Again like BFS algorithm, it is not an optimal algorithm in general but if cost of steps be equal to one, this algorithm would be optimal. + +**Time and Space Complexity** + +It uses memory to save queues and nodes that had been expanded. But the time for reaching an optimal solution is half of the time in BFS algorithm and this is what makes this algorithm so attractive to use. + + + +## Conclusion +The algorithms discussed above are the most used and practical algorithms that are being used in the academic and industry enviornment. These algorithm are being developed and expanded everyday and in a near future we will be using much more faster and efficient algorithms. + +![](https://github.com/mohsenosl99/notes/blob/master/notebooks/2_uninformed_search/images/Conclusion.jpg) + + +## References + +-Artificial Intelligence, A modern approach, Russel & Norvig (Third Edition) + +-www.geeksforgeeks.org + +-www.javatpoint.com/ai-uninformed-search-algorithms + + + + + + + + + + + diff --git a/notebooks/2_uninformed_search/matadata.yml b/notebooks/2_uninformed_search/matadata.yml new file mode 100644 index 00000000..8cf6ba1a --- /dev/null +++ b/notebooks/2_uninformed_search/matadata.yml @@ -0,0 +1,34 @@ +title: LN | Uninformed Search # shown on browser tab + +header: + title: Uninformed Search # title of your notebook + description: We witie about an uninformed search. + +authors: + label: + position: top + content: + # list of notebook authors + - name: Mohsen Osooli # name of author + role: Author # change this if you want + contact: + # list of contact information + - link: https://github.com/mohsenosl99 + icon: fab fa-github + # optionally add other contact information like + # - link: # contact link + # icon: # awsomefont tag for link (check: https://fontawesome.com/v5.15/icons) + label: + position: top + content: + # list of notebook authors + - name: Pooria Shahmiri # name of author + role: Author # change this if you want + contact: + # list of contact information + - link: https://github.com/pouriashm + icon: fab fa-github +comments: + # enable comments for your post + label: false + kind: comments diff --git a/notebooks/2_uninformed_search/metadata.yml b/notebooks/2_uninformed_search/metadata.yml deleted file mode 100644 index 2b41c0ad..00000000 --- a/notebooks/2_uninformed_search/metadata.yml +++ /dev/null @@ -1,40 +0,0 @@ -title: LN | Uninformed Search - -header: - title: Uninformed Search - -authors: - label: - position: top - text: Authors - kind: people - content: - - name: Fatemeh Khashei - role: Author - contact: - - icon: fab fa-github - link: https://github.com/fatteme - - icon: fas fa-envelope - link: mailto:fatt3me@gmail.com - - - name: Hossein Sobhi - role: Author - contact: - - icon: fas fa-envelope - link: mailto:hoseinsobhi@gmail.com - - - name: Ali Asghar Ghanati - role: Author - contact: - - icon: fas fa-envelope - link: mailto:a.a.ghanati@gmail.com - - - name: Zeinab Sadat Saghi - role: Supervisor - contact: - - icon: fab fa-github - link: https://github.com/atenasadat - - icon: fab fa-linkedin - link: https://www.linkedin.com/in/atena-saghi/ - - icon: fas fa-envelope - link: mailto:atenasaghi@ce.sharif.edu