AI Lab Manual

Download as pdf or txt
Download as pdf or txt
You are on page 1of 24

Program 1

Object:- Write a python program to implement Breadth First Search Traversal.


Source Code:-
import collections
# BFS algorithm
def bfs(graph, root):

visited, queue = set(), collections.deque([root])


visited.add(root)

while queue:

# Dequeue a vertex from queue


vertex = queue.popleft()
print(str(vertex) + " ", end="")

# If not visited, mark it as visited, and


# enqueue it
for neighbour in graph[vertex]:
if neighbour not in visited:
visited.add(neighbour)
queue.append(neighbour)

if __name__ == '__main__':
graph = {0: [1, 2], 1: [2], 2: [3], 3: [1, 2]}
print("Following is Breadth First Traversal: ")
bfs(graph, 0)

1
Output:-

2
Program 2
Object:- Write a python program to implement Water Jug Problem?
Source Code:-
from collections import deque
def waterjug(x,y,z):
visited=set()
queue=deque([(0,0,0,[])])
while queue:
a,b,step,path=queue.popleft()
if a==z:
print(f"The waterjug is got ({a},{b})")
print(f"Step :{step}")
print(f"Path :{path}")
return True
if (a,b) in visited:
continue
visited.add((a,b))
possible_moves=[
(x,b,step+1,path+["Fill Jug 1"]),
(a,y,step+1,path+["Fill Jug 2"]),
(0,b,step+1,path+["Empty Jug 1"]),
(a,0,step+1,path+["Empty Jug 2"]),
(a-min(a,y-b),b+min(a,y-b),step+1,path+["pour Jug 1 to Jug 2"]),
(a+min(b,x-a),b-min(b,x-a),step+1,path+["pour Jug 2 to Jug 1"])

]
for actions in possible_moves:
if (actions[0],actions[1]) not in visited:
queue.append(actions)
print("No Possible solution")

3
return False
x=4
y=3
z=2
r=waterjug(x,y,z)

Output:-

4
Program 3
Object:- Write a python program to remove punctuations from the given string?
Source Code:-
punctuations = '''!()-[]{};:'"\,<>./?@#$%^&*_~'''
my_str = input("Enter a string: ")
# remove punctuation from the string
no_punct = ""
for char in my_str:
if char not in punctuations:
no_punct = no_punct + char
# display the unpunctuated string
print(no_punct)

Output:-

5
Program 4
Object:- Write a python program to sort the sentence in alphabetical order?
Source Code:-
my_str = input("Enter a string: ")
# breakdown the string into a list of words
words = [word.lower() for word in my_str.split()]
# sort the list
words.sort()
# display the sorted words
print("The sorted words of the sentence are:")
for word in words:
print(word)

Output:-

6
Program 5
Object:- Write a program to implement Hangman game using python.
Source Code:-
import random

def select_word():
words = ["python", "java", "kotlin", "javascript"]
return random.choice(words)

def display_hangman(tries):

7
return stages[tries]

def hangman():
word = select_word()
word_completion = "_" * len(word)
guessed = False
guessed_letters = []
guessed_words = []
tries = 6

print("Let's play Hangman!")


print(display_hangman(tries))
print(word_completion)
print("\n")

while not guessed and tries > 0:


guess = input("Please guess a letter or word: ").lower()
if len(guess) == 1 and guess.isalpha():
if guess in guessed_letters:
print("You already guessed the letter", guess)
elif guess not in word:
print(guess, "is not in the word.")
tries -= 1
guessed_letters.append(guess)
else:
print("Good job,", guess, "is in the word!")
guessed_letters.append(guess)
word_as_list = list(word_completion)
indices = [i for i, letter in enumerate(word) if letter == guess]
for index in indices:

8
word_as_list[index] = guess
word_completion = "".join(word_as_list)
if "_" not in word_completion:
guessed = True
elif len(guess) == len(word) and guess.isalpha():
if guess in guessed_words:
print("You already guessed the word", guess)
elif guess != word:
print(guess, "is not the word.")
tries -= 1
guessed_words.append(guess)
else:
guessed = True
word_completion = word
else:
print("Not a valid guess.")
print(display_hangman(tries))
print(word_completion)
print("\n")
if guessed:
print("Congrats, you guessed the word! You win!")
else:
print("Sorry, you ran out of tries. The word was " + word + ". Maybe next time!")

if __name__ == "__main__":
hangman()

9
Output:-

10
Program 6
Object:- Write a program to implement Tic-Tac-Toe game using python.
Source Code:-
def print_board(board):
print(f"{board[0]} | {board[1]} | {board[2]}")
print("--+---+--")
print(f"{board[3]} | {board[4]} | {board[5]}")
print("--+---+--")
print(f"{board[6]} | {board[7]} | {board[8]}")
print()

def check_win(board, player):


# Check rows, columns, and diagonals
win_conditions = [
[0, 1, 2], [3, 4, 5], [6, 7, 8], # Rows
[0, 3, 6], [1, 4, 7], [2, 5, 8], # Columns
[0, 4, 8], [2, 4, 6] # Diagonals
]
for condition in win_conditions:
if board[condition[0]] == board[condition[1]] == board[condition[2]] == player:
return True
return False

def check_tie(board):
return " " not in board

# Function to play Tic-Tac-Toe


def play_tic_tac_toe():
# Initialize an empty board
board = [" "] * 9

11
current_player = "X" # X always goes first

while True:
print_board(board)
print(f"Player {current_player}'s turn")

while True:
try:
move = int(input(f"Choose a position (1-9): ")) - 1
if board[move] == " ":
board[move] = current_player
break
else:
print("This position is already taken. Choose another.")
except (ValueError, IndexError):
print("Invalid input. Please choose a number between 1 and 9.")

if check_win(board, current_player):
print_board(board)
print(f"Player {current_player} wins!")
break
if check_tie(board):
print_board(board)
print("It's a tie!")
break

# Switch players
current_player = "O" if current_player == "X" else "X"
if __name__ == "__main__":
play_tic_tac_toe()

12
Output:-

13
Program 7
Object:- Write a python program to remove stop words for a given passage from a text file
using NLTK?
Source Code:-
import nltk
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize

# Ensure required NLTK packages are downloaded


nltk.download('punkt')
nltk.download('stopwords')

def remove_stopwords(file_path):
# Read the passage from the text file
try:
with open(file_path, 'r') as file:
text = file.read()
except FileNotFoundError:
print("The specified file does not exist.")
return

# Tokenize the text into words


words = word_tokenize(text)

# Get the list of stop words for English


stop_words = set(stopwords.words('english'))

# Remove stop words


filtered_words = [word for word in words if word.lower() not in stop_words]

14
# Join the filtered words into a cleaned passage
cleaned_text = ' '.join(filtered_words)

print("Original Passage:")
print(text)
print("\nCleaned Passage:")
print(cleaned_text)

# Specify the path to the input text file


file_path = 'input.txt' # Replace 'input.txt' with your file name
remove_stopwords(file_path)

Output:-

15
Program 8
Object:- Write a python program to implement stemming for a given sentence using NLTK?
Source Code:-
import nltk
from nltk.stem import PorterStemmer
from nltk.tokenize import word_tokenize

# Ensure required NLTK packages are downloaded


nltk.download('punkt')

def perform_stemming(sentence):
# Initialize the PorterStemmer
stemmer = PorterStemmer()

# Tokenize the sentence into words


words = word_tokenize(sentence)

# Apply stemming to each word


stemmed_words = [stemmer.stem(word) for word in words]

# Join the stemmed words into a sentence


stemmed_sentence = ' '.join(stemmed_words)

print("Original Sentence:")
print(sentence)
print("\nStemmed Sentence:")
print(stemmed_sentence)
# Input sentence
sentence = "The cats are running faster than the dogs, and they will be jumping again soon."
perform_stemming(sentence)

16
Output:-

17
Program 9
Object:- Write a python program to POS (Parts of Speech) tagging for the give sentence
using NLTK?
Source Code:-
import nltk

# Ensure required NLTK packages are downloaded


nltk.download('punkt')
nltk.download('averaged_perceptron_tagger')

def pos_tagging(sentence):
# Tokenize the sentence into words
words = nltk.word_tokenize(sentence)

# Perform POS tagging


pos_tags = nltk.pos_tag(words)

print("Parts of Speech Tags:")


for word, tag in pos_tags:
print(f"{word} -> {tag}")

# Input sentence
sentence = "The quick brown fox jumps over the lazy dog."
pos_tagging(sentence)

18
Output:-

19
Program 10
Object:- Write a python program to implement Lemmatization using NLTK?
Source Code:-
import nltk
from nltk.stem import WordNetLemmatizer
from nltk.tokenize import word_tokenize

# Ensure required NLTK packages are downloaded


nltk.download('punkt')
nltk.download('wordnet')
nltk.download('omw-1.4')

def perform_lemmatization(sentence):
# Initialize the WordNetLemmatizer
lemmatizer = WordNetLemmatizer()

# Tokenize the sentence into words


words = word_tokenize(sentence)

# Apply lemmatization to each word


lemmatized_words = [lemmatizer.lemmatize(word) for word in words]

# Join the lemmatized words into a sentence


lemmatized_sentence = ' '.join(lemmatized_words)

print("Original Sentence:")
print(sentence)
print("\nLemmatized Sentence:")
print(lemmatized_sentence)

20
# Input sentence
sentence = "The leaves are falling from the trees and children are playing in the park."
perform_lemmatization(sentence)

Output:-

21
Program 11
Object:- Write a python program to for Text Classification for the give sentence using NLTK
Source Code:-
import nltk
from nltk.tokenize import word_tokenize
from nltk.corpus import movie_reviews
from nltk.classify import NaiveBayesClassifier
from nltk.classify.util import accuracy
from nltk.corpus import stopwords

# Ensure required NLTK packages are downloaded


nltk.download('punkt')
nltk.download('stopwords')
nltk.download('movie_reviews')

# Feature extraction function


def extract_features(words):
stop_words = set(stopwords.words('english'))
return {word: True for word in words if word.lower() not in stop_words}

# Load the movie reviews dataset


def load_dataset():
positive_features = [(extract_features(movie_reviews.words(fileid)), 'Positive') for fileid in
movie_reviews.fileids('pos')]
negative_features = [(extract_features(movie_reviews.words(fileid)), 'Negative') for fileid
in movie_reviews.fileids('neg')]
return positive_features, negative_features

# Train the Naive Bayes Classifier


def train_classifier():
positive_features, negative_features = load_dataset()

22
train_data = positive_features[:800] + negative_features[:800]
test_data = positive_features[800:] + negative_features[800:]
classifier = NaiveBayesClassifier.train(train_data)
return classifier, test_data

# Classify a given sentence


def classify_sentence(classifier, sentence):
words = word_tokenize(sentence)
features = extract_features(words)
return classifier.classify(features)

# Main function
def main():
classifier, test_data = train_classifier()

# Test the classifier accuracy


print(f"Classifier Accuracy: {accuracy(classifier, test_data) * 100:.2f}%")

# Classify a sample sentence


sample_sentence = "The plot was dull and the acting was terrible."
classification = classify_sentence(classifier, sample_sentence)
print(f"Input Sentence: \"{sample_sentence}\"")
print(f"Classification: {classification}")

# Show top features


print("\nMost Informative Features:")
classifier.show_most_informative_features(10)

if __name__ == "__main__":
main()

23
Output:-

24

You might also like