read a txt file into a list ,igno ENTER
def readlist(file_name):
with open(file_name,'r') as f:
lst = [i.strip().split(',') for i in f.readlines()]
return lst
readlist('f.txt')
All generations.
def readlist(file_name):
with open(file_name,'r') as f:
lst = [i.strip().split(',') for i in f.readlines()]
return lst
readlist('f.txt')
import urllib.request
from bs4 import BeautifulSoup
url = "https://en.wikipedia.org/wiki/List_of_state_and_union_territory_capitals_in_India"
html = urllib.request.urlopen(url)
soup = BeautifulSoup(html, 'html.parser')
for tr in soup.find_all('table'):
for td in tr.find_all('td'):
text = td.find(text=True)
print(text)
def read_html():
with open('my.html') as f:
return f.read()
html = read_html()
def isChinese(word):
if u'\u4e00' <= word <= u'\u9fff':
return True
else:
return False
isChinese('人')
def find_element(filename):
with open(filename) as file:
tree = html.fromstring(file.read())
return tree.xpath('//li/a/@href')
find_element('sample.html')
def is_japanese(char):
return 0x4e00 <= ord(char) <= 0x9fff
is_japanese("日")
import re
def chinese_char(string):
match = re.search(u'[\u4e00-\u9fff]+', string)
if match != None:
return True
else:
return False
def find_chinese(filename):
text = open(filename, "r").read()
text = re.sub("[\s+\.\!\/_,$%^*(+\"\'“”《》?“]+|[+——!,。?、~@#¥%……&*()]+", "",text)
result = ""
for i in text:
if langid.classify(i)[0]=="zh":
result += i
return result
def readIntoList():
with open('C:\Users\xxx\Desktop\xxx\xxx.txt','r') as f:
li = []
line = f.readline()
while line:
#print(line)
line = line.replace('\n','')
line = line.replace(' ',' ')
line = line.replace(' ',' ')
li.append(line)
line = f.readline()
f.close()
return li
import codecs
with codecs.open('kantai_collection.html', 'r', 'utf-8') as f:
data = f.read()
with codecs.open('kantai_collection.txt', 'w', 'utf-8') as f2:
for x in data.split('<p>')[1:]:
f2.write(x.split('</p>')[0] + '\n')
def is_japanese(s):
if s >= u'\u4e00' and s <= u'\u9faf':
return True
else:
return False
is_japanese(u'涼太')
function(htmlFile) {
//TODO
}
import re
from bs4 import BeautifulSoup
def get_japanese(html_file):
soup = BeautifulSoup(open(html_file), 'html.parser')
japanese = soup.findAll(text=re.compile(".*[\u4E00-\u9FFF]+.*"))
return japanese
get_japanese("test.html")
import glob
for file in glob.glob("*.html"):
print(file)
def delete_words(words, filename):
f = open(filename,'r')
lines = f.readlines()
for line in lines:
for word in words:
line = line.replace(word,'')
print(line)
f.close()
titles = tree.xpath("//div[@class='course-card-container']//h3/text()")
import re
def find_japanese(file):
japanese_regex = re.compile(r'[\u3000-\u303f\u3040-\u309f\u30a0-\u30ff\uff00-\uff9f\u4e00-\u9faf\u3400-\u4dbf]')
text = open(file)
text = text.read()
text = text.strip().split()
japanese = []
for i in text:
if japanese_regex.search(i):
japanese.append(i)
return japanese
find_japanese('html_file.txt')
def image_to_string(img):
return (img.split("_")[1]).split(".")[0]
image_to_string(image_list[0])
// function that: compare alots of number in a list, pickup the greast number
function findGreatestNum(numbers){
let greatestNum = numbers[0];
for (let num of numbers){
if (num > greatestNum){
greatestNum = num;
}
}
return greatestNum;
}
console.log(findGreatestNum([3,4,5,6,7,8,9,10]));
Script
function greast(numbers){
if(numbers.length === 0){
return 0
}
var max = numbers[0]
for(var i = 1; i < numbers.length; i++){
if(numbers[i] > max){
max = numbers[i]
}
}
return max
}
greast([2, 3, 1, 4, 5, 6, 7])
const compareNumbers = (a, b) => {
if (a > b) {
return a;
} else if (a < b) {
return b;
} else {
return `${a} = ${b}`;
}
}
compareNumbers(1, 2)
from bs4 import BeautifulSoup
import urllib.request
def get_soup(url):
page = urllib.request.urlopen(url).read()
soup = BeautifulSoup(page, "html.parser")
return soup
soup = get_soup("https://www.pythonforbeginners.com/beautifulsoup/beautifulsoup-4-python")
from bs4 import BeautifulSoup
with open('html_file.html') as file:
soup = BeautifulSoup(file.read())
text = soup.get_text()
def connect():
conn = ibm_db.connect(dsn, "", "")
return conn
from bs4 import BeautifulSoup
import requests
import re
r = requests.get('http://www.aozora.gr.jp/cards/000148/files/773_14547.html')
soup = BeautifulSoup(r.content, 'lxml')
def get_text(soup):
text_list = []
for text in soup.find_all('p'):
text_list.append(text.get_text())
return text_list
with open('text.txt', 'w') as f:
for text in get_text(soup):
f.write(text + '\n')
# Use the text in the screen to click the target
def clickWithText(text):
if exists(text):
click(text)
else:
exit(1)
clickWithText("Next")
def baidu_search(text):
driver.find_element_by_id("kw").send_keys(text)
driver.find_element_by_id("su").click()
baidu_search("Hao")
from bs4 import BeautifulSoup
def read_html(file):
with open(file) as f:
soup = BeautifulSoup(f, 'lxml')
return soup.find_all(class_='jp')
read_html('japanese.html')
with open('/Users/user/myfile.txt', 'w') as file:
for i in range(1, 11):
file.write('{}\n'.format(i))
function greatest(numbers) {
let greatest = 0;
numbers.forEach(function (n) {
if (n > greatest) {
greatest = n;
}
});
return greatest;
}
greatest([1, 2, 34, 5, 6, 7, 9, 10])
clickElementWithText("text");
def read_image(image_file):
'''
this function takes in an image file and outputs the text contained in the file.
'''
text = image_file
return text
def is_chinese(uchar):
if '\u4e00' <= uchar<='\u9fff':
return True
else:
return False
# coding: UTF-8
import re
def find_chinese_characters(file_path):
with open(file_path, 'r') as file:
text = file.read()
pattern = re.compile(u'[\u4e00-\u9fff]+')
result = pattern.findall(text)
return result
print(find_chinese_characters('text.txt'))
def is_japanese_character(character):
return 0x4E00 <= ord(character) <= 0x9FFF
is_japanese_character("中")
from bs4 import BeautifulSoup
import re
from urllib.request import urlopen
def export(url):
soup = BeautifulSoup(urlopen(url), 'html.parser')
japanese_characters = soup.find_all('div', {'class': 'text'})
with open('japanese.txt', 'w') as f:
for x in japanese_characters:
f.write(x.get_text())
f.write('\n\n')
def read_html(folder):
for filename in os.listdir(folder):
if filename.endswith(".html"):
infile = folder + filename
with open(infile, "r") as f:
text = f.read()
print(text)
import beautifulsoup4
def read_html(file):
f = open(file)
html = f.read()
soup = BeautifulSoup(html, 'html.parser')
return soup
read_html("test.html")
import selenium.webdriver
driver = selenium.webdriver.Chrome()
driver.get('https://www.python.org/')
element = driver.find_element_by_class_name('search-field')
element.get_attribute('style')
def is_chinese(uchar):
# is_chinese('一') = True
# is_chinese('A') = False
if u'\u4e00' <= uchar <= u'\u9fff':
return True
else:
return False
import os
def read_files(path):
file_list = os.listdir(path)
return file_list
read_files('./')
const fs = require('fs');
const path = require('path');
const html = fs.readFileSync(path.join(__dirname, 'japan.html'), 'utf8');
const puppeteer = require('puppeteer');
const cheerio = require('cheerio');
const $ = cheerio.load(html);
const japanCharCodes = [0x3040, 0x3097];
const japanChars = [];
japanCharCodes.forEach(japanCharCode => {
for (let i = 0; i < 96; i++) {
japanChars.push(String.fromCodePoint(japanCharCode + i));
}
});
const browser = await puppeteer.launch({
headless: false,
});
const page = await browser.newPage();
await page.goto('http://www.rikai.com/library/kanjitables/kanji_codes.unicode.shtml');
await page.on('console', async msg => {
def get_missing_elements(driver):
web_elements = driver.find_elements_by_xpath("//*")
html_elements = driver.page_source
for element in web_elements:
html_elements = html_elements.replace(element.get_attribute('outerHTML'), '')
print(html_elements)
return html_elements
def delete_words(words, file):
f = open(file, 'r', encoding='utf8')
for line in f.readlines():
for word in words:
line = line.replace(word, '')
f1 = open(file, 'a', encoding='utf8')
f1.write(line)
f.close()
def echo_file(fname):
with open(fname) as f:
for line in f:
print(line)
echo_file('path/to/file')
Generate
More than just a code generator. A tool that helps you with a wide range of tasks. All in one place.
Function from Description
Text Description to SQL Command
Translate Languages
Generate HTML from Description
Code to Explanation
Fix invalid Code
Get Test for Code
Class from Description
Regex from Description
Regex to Explanation
Git Command from Description
Linux Command
Function from Docstring
Add typing to code
Get Language from Code
Time complexity
CSS from Description
Meta Tags from Description